Skip to content

Commit

Permalink
Merge branch 'master' into cl/migrate-parachains-paras-benchmarking-t…
Browse files Browse the repository at this point in the history
…o-v2
  • Loading branch information
bkchr authored Dec 12, 2024
2 parents ca9cf63 + 459b4a6 commit a49edf3
Show file tree
Hide file tree
Showing 782 changed files with 25,168 additions and 12,217 deletions.
1 change: 0 additions & 1 deletion .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ retries = 5
# The number of threads to run tests with. Supported values are either an integer or
# the string "num-cpus". Can be overridden through the `--test-threads` option.
# test-threads = "num-cpus"

test-threads = 20

# The number of threads required for each test. This is generally used in overrides to
Expand Down
28 changes: 28 additions & 0 deletions .github/actions/workflow-stopper/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: "stop all workflows"
description: "Action stops all workflows in a PR to save compute resources."
inputs:
app-id:
description: "App id"
required: true
app-key:
description: "App token"
required: true
runs:
using: "composite"
steps:
- name: Worfklow stopper - Generate token
uses: actions/create-github-app-token@v1
id: app-token
with:
app-id: ${{ inputs.app-id }}
private-key: ${{ inputs.app-key }}
owner: "paritytech"
repositories: "workflow-stopper"
- name: Workflow stopper - Stop all workflows
uses: octokit/[email protected]
with:
route: POST /repos/paritytech/workflow-stopper/actions/workflows/stopper.yml/dispatches
ref: main
inputs: '${{ format(''{{ "github_sha": "{0}", "github_repository": "{1}", "github_ref_name": "{2}", "github_workflow_id": "{3}", "github_job_name": "{4}" }}'', github.event.pull_request.head.sha, github.repository, github.ref_name, github.run_id, github.job) }}'
env:
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
2 changes: 1 addition & 1 deletion .github/env
Original file line number Diff line number Diff line change
@@ -1 +1 @@
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034"
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558"
195 changes: 189 additions & 6 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def setup_logging():
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''

parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)

for arg, config in common_args.items():
parser_bench.add_argument(arg, **config)
Expand All @@ -67,6 +67,35 @@ def setup_logging():
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')


"""
BENCH OMNI
"""

bench_example = '''**Examples**:
Runs all benchmarks
%(prog)s
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''

parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)

for arg, config in common_args.items():
parser_bench_old.add_argument(arg, **config)

parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')


"""
FMT
"""
Expand Down Expand Up @@ -98,12 +127,12 @@ def main():

print(f'args: {args}')

if args.command == 'bench':
if args.command == 'bench-omni':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "release"
profile = "production"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
Expand All @@ -113,11 +142,22 @@ def main():

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}")
build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
print(f'-- building "{runtime["name"]}" with `{build_command}`')
os.system(build_command)
print(f'-- listing pallets for benchmark for {runtime["name"]}')
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
output = os.popen(
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read()
list_command = f"frame-omni-bencher v1 benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--runtime={wasm_file} " \
f"{runtime['bench_flags']}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
Expand Down Expand Up @@ -230,6 +270,149 @@ def main():
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

if args.command == 'bench':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "production"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
print(f'Filtered out runtimes: {runtimesMatrix}')

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked"
print(f'-- building {runtime["name"]} with `{build_command}`')
os.system(build_command)

chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev"

machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}"
print(f"Running machine test for `{machine_test}`")
os.system(machine_test)

print(f'-- listing pallets for benchmark for {chain}')
list_command = f"target/{profile}/{runtime['old_bin']} " \
f"benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--chain={chain}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
for pallet in raw_pallets:
if pallet:
all_pallets.add(pallet.split(',')[0].strip())

pallets = list(all_pallets)
print(f'Pallets in {runtime["name"]}: {pallets}')
runtime_pallets_map[runtime['name']] = pallets

print(f'\n')

# filter out only the specified pallets from collected runtimes/pallets
if args.pallet:
print(f'Pallets: {args.pallet}')
new_pallets_map = {}
# keep only specified pallets if they exist in the runtime
for runtime in runtime_pallets_map:
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
new_pallets_map[runtime] = args.pallet

runtime_pallets_map = new_pallets_map

print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')

if not runtime_pallets_map:
if args.pallet and not args.runtime:
print(f"No pallets {args.pallet} found in any runtime")
elif args.runtime and not args.pallet:
print(f"{args.runtime} runtime does not have any pallets")
elif args.runtime and args.pallet:
print(f"No pallets {args.pallet} found in {args.runtime}")
else:
print('No runtimes found')
sys.exit(1)

for runtime in runtime_pallets_map:
for pallet in runtime_pallets_map[runtime]:
config = runtimesMatrix[runtime]
header_path = os.path.abspath(config['header'])
template = None

chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev"

print(f'-- config: {config}')
if runtime == 'dev':
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
print(f'-- running: {search_manifest_path}')
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
if args.fail_fast:
print_and_log(f'Error: {pallet} not found in dev runtime')
sys.exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
template = config['template']
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
output_path = default_path
if pallet.startswith("pallet_xcm_benchmarks"):
template = config['template']
output_path = xcm_path

print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \
f"--extrinsic=* " \
f"--chain={chain} " \
f"--pallet={pallet} " \
f"--header={header_path} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
f"--steps=50 " \
f"--repeat=20 " \
f"--heap-pages=4096 " \
f"{f'--template={template} ' if template else ''}" \
f"--no-storage-info --no-min-squares --no-median-slopes "
print(f'-- Running: {cmd} \n')
status = os.system(cmd)

if status != 0 and args.fail_fast:
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
# push failed pallets to failed_benchmarks
if status != 0:
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
else:
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
Expand Down
Loading

0 comments on commit a49edf3

Please sign in to comment.