Skip to content

Commit

Permalink
Merge branch 'master' into migrating-polkadot-runtime-common-auctions…
Browse files Browse the repository at this point in the history
…-benchmarking-to-v2
  • Loading branch information
re-gius authored Dec 10, 2024
2 parents fccd816 + 65a4e5e commit ca25bff
Show file tree
Hide file tree
Showing 623 changed files with 16,439 additions and 6,550 deletions.
1 change: 0 additions & 1 deletion .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ retries = 5
# The number of threads to run tests with. Supported values are either an integer or
# the string "num-cpus". Can be overridden through the `--test-threads` option.
# test-threads = "num-cpus"

test-threads = 20

# The number of threads required for each test. This is generally used in overrides to
Expand Down
2 changes: 1 addition & 1 deletion .github/env
Original file line number Diff line number Diff line change
@@ -1 +1 @@
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034"
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558"
195 changes: 189 additions & 6 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def setup_logging():
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''

parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)

for arg, config in common_args.items():
parser_bench.add_argument(arg, **config)
Expand All @@ -67,6 +67,35 @@ def setup_logging():
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')


"""
BENCH OMNI
"""

bench_example = '''**Examples**:
Runs all benchmarks
%(prog)s
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''

parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)

for arg, config in common_args.items():
parser_bench_old.add_argument(arg, **config)

parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')


"""
FMT
"""
Expand Down Expand Up @@ -98,12 +127,12 @@ def main():

print(f'args: {args}')

if args.command == 'bench':
if args.command == 'bench-omni':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "release"
profile = "production"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
Expand All @@ -113,11 +142,22 @@ def main():

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}")
build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
print(f'-- building "{runtime["name"]}" with `{build_command}`')
os.system(build_command)
print(f'-- listing pallets for benchmark for {runtime["name"]}')
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
output = os.popen(
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read()
list_command = f"frame-omni-bencher v1 benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--runtime={wasm_file} " \
f"{runtime['bench_flags']}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
Expand Down Expand Up @@ -230,6 +270,149 @@ def main():
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

if args.command == 'bench':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}

profile = "production"

print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
print(f'Filtered out runtimes: {runtimesMatrix}')

# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked"
print(f'-- building {runtime["name"]} with `{build_command}`')
os.system(build_command)

chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev"

machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}"
print(f"Running machine test for `{machine_test}`")
os.system(machine_test)

print(f'-- listing pallets for benchmark for {chain}')
list_command = f"target/{profile}/{runtime['old_bin']} " \
f"benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--chain={chain}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')

all_pallets = set()
for pallet in raw_pallets:
if pallet:
all_pallets.add(pallet.split(',')[0].strip())

pallets = list(all_pallets)
print(f'Pallets in {runtime["name"]}: {pallets}')
runtime_pallets_map[runtime['name']] = pallets

print(f'\n')

# filter out only the specified pallets from collected runtimes/pallets
if args.pallet:
print(f'Pallets: {args.pallet}')
new_pallets_map = {}
# keep only specified pallets if they exist in the runtime
for runtime in runtime_pallets_map:
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
new_pallets_map[runtime] = args.pallet

runtime_pallets_map = new_pallets_map

print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')

if not runtime_pallets_map:
if args.pallet and not args.runtime:
print(f"No pallets {args.pallet} found in any runtime")
elif args.runtime and not args.pallet:
print(f"{args.runtime} runtime does not have any pallets")
elif args.runtime and args.pallet:
print(f"No pallets {args.pallet} found in {args.runtime}")
else:
print('No runtimes found')
sys.exit(1)

for runtime in runtime_pallets_map:
for pallet in runtime_pallets_map[runtime]:
config = runtimesMatrix[runtime]
header_path = os.path.abspath(config['header'])
template = None

chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev"

print(f'-- config: {config}')
if runtime == 'dev':
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
print(f'-- running: {search_manifest_path}')
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
if args.fail_fast:
print_and_log(f'Error: {pallet} not found in dev runtime')
sys.exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
template = config['template']
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
output_path = default_path
if pallet.startswith("pallet_xcm_benchmarks"):
template = config['template']
output_path = xcm_path

print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \
f"--extrinsic=* " \
f"--chain={chain} " \
f"--pallet={pallet} " \
f"--header={header_path} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
f"--steps=50 " \
f"--repeat=20 " \
f"--heap-pages=4096 " \
f"{f'--template={template} ' if template else ''}" \
f"--no-storage-info --no-min-squares --no-median-slopes "
print(f'-- Running: {cmd} \n')
status = os.system(cmd)

if status != 0 and args.fail_fast:
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
sys.exit(1)

# Otherwise collect failed benchmarks and print them at the end
# push failed pallets to failed_benchmarks
if status != 0:
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
else:
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]

if failed_benchmarks:
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

if successful_benchmarks:
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')

elif args.command == 'fmt':
command = f"cargo +nightly fmt"
Expand Down
38 changes: 19 additions & 19 deletions .github/scripts/cmd/test_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@

def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None):
return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \
f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
f"--pallet={pallets} --header={header} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
Expand Down Expand Up @@ -93,7 +93,7 @@ def tearDown(self):

def test_bench_command_normal_execution_all_runtimes(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
pallet=['pallet_balances'],
fail_fast=True,
Expand All @@ -117,10 +117,10 @@ def test_bench_command_normal_execution_all_runtimes(self):

expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),

call(get_mock_bench_output(
runtime='kitchensink',
Expand Down Expand Up @@ -150,7 +150,7 @@ def test_bench_command_normal_execution_all_runtimes(self):

def test_bench_command_normal_execution(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend'],
pallet=['pallet_balances', 'pallet_staking'],
fail_fast=True,
Expand All @@ -170,7 +170,7 @@ def test_bench_command_normal_execution(self):

expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),

# Westend runtime calls
call(get_mock_bench_output(
Expand All @@ -193,7 +193,7 @@ def test_bench_command_normal_execution(self):

def test_bench_command_normal_execution_xcm(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend'],
pallet=['pallet_xcm_benchmarks::generic'],
fail_fast=True,
Expand All @@ -213,7 +213,7 @@ def test_bench_command_normal_execution_xcm(self):

expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),

# Westend runtime calls
call(get_mock_bench_output(
Expand All @@ -229,7 +229,7 @@ def test_bench_command_normal_execution_xcm(self):

def test_bench_command_two_runtimes_two_pallets(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend', 'rococo'],
pallet=['pallet_balances', 'pallet_staking'],
fail_fast=True,
Expand All @@ -250,8 +250,8 @@ def test_bench_command_two_runtimes_two_pallets(self):

expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
runtime='westend',
Expand Down Expand Up @@ -287,7 +287,7 @@ def test_bench_command_two_runtimes_two_pallets(self):

def test_bench_command_one_dev_runtime(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['dev'],
pallet=['pallet_balances'],
fail_fast=True,
Expand All @@ -309,7 +309,7 @@ def test_bench_command_one_dev_runtime(self):

expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
runtime='kitchensink',
Expand All @@ -324,7 +324,7 @@ def test_bench_command_one_dev_runtime(self):

def test_bench_command_one_cumulus_runtime(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['asset-hub-westend'],
pallet=['pallet_assets'],
fail_fast=True,
Expand All @@ -344,7 +344,7 @@ def test_bench_command_one_cumulus_runtime(self):

expected_calls = [
# Build calls
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),
# Asset-hub-westend runtime calls
call(get_mock_bench_output(
runtime='asset-hub-westend',
Expand All @@ -359,7 +359,7 @@ def test_bench_command_one_cumulus_runtime(self):

def test_bench_command_one_cumulus_runtime_xcm(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['asset-hub-westend'],
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
fail_fast=True,
Expand All @@ -379,7 +379,7 @@ def test_bench_command_one_cumulus_runtime_xcm(self):

expected_calls = [
# Build calls
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),
# Asset-hub-westend runtime calls
call(get_mock_bench_output(
runtime='asset-hub-westend',
Expand Down
Loading

0 comments on commit ca25bff

Please sign in to comment.