-
Notifications
You must be signed in to change notification settings - Fork 1
/
benchmark.py
executable file
·111 lines (89 loc) · 3.93 KB
/
benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env python3
import argparse
from typing import List
import config
import lib.util as util
from lib.algorithm import AlgorithmName
from lib.tool import ToolName
from lib.dataset import Dataset
from lib.benchmark_summary import BenchmarkSummary, OutputFormat, ResultsPrinter
from drivers.driver_graphblast import DriverGraphBLAST
from drivers.driver_gunrock import DriverGunrock
from drivers.driver_lagraph import DriverLaGraph
from drivers.driver_spla import DriverSpla
from drivers.driver import Driver
def tool_to_driver(tool: ToolName) -> Driver:
drivers = {
ToolName.spla: lambda _: DriverSpla(),
ToolName.lagraph: lambda _: DriverLaGraph(),
ToolName.gunrock: lambda _: DriverGunrock(),
ToolName.graphblast: lambda _: DriverGraphBLAST(),
}
return drivers[tool](None)
def main():
parser = argparse.ArgumentParser(
description='Bebchmarking tool for the graph algorithms')
parser.add_argument('--algo',
type=AlgorithmName,
choices=list(AlgorithmName),
help='Select algorithm to run (otherwise all algorithms are benchmarked)')
parser.add_argument('--tool',
type=ToolName,
choices=list(ToolName),
help='Select tool to use (otherwise all tools are benchmarked)')
parser.add_argument('--output',
default=config.BENCHMARK_OUTPUT,
help='File to dump benchmark results')
parser.add_argument('--format',
type=OutputFormat,
choices=list(OutputFormat),
default=OutputFormat.csv,
help='Format to dump benchmark results')
parser.add_argument('--printer',
type=ResultsPrinter,
choices=list(ResultsPrinter),
default=ResultsPrinter.all,
help='Measurement printer')
args = parser.parse_args()
drivers: List[Driver] = []
if args.tool is None:
drivers = map(tool_to_driver, list(ToolName))
else:
drivers = [tool_to_driver(args.tool)]
algorithms: List[AlgorithmName] = []
if args.algo is None:
algorithms = list(AlgorithmName)
else:
algorithms = [args.algo]
def print_status(status: str, *args):
util.print_status('benchmark', status, *args)
summary = BenchmarkSummary()
try:
for dataset_name in config.BENCHMARK_DATASETS:
print_status(f'dataset {dataset_name}', 'start preparation')
dataset = Dataset(dataset_name)
print_status(f'dataset {dataset_name}', 'finish preparation')
for algo in algorithms:
status_algo_dataset = f'algo: {algo}, dataset: {dataset.name}'
print_status(status_algo_dataset,
'check if all tools can be used')
all_can_run = all(map(
lambda driver: driver.can_run(dataset, algo), drivers
))
if not all_can_run:
print_status(status_algo_dataset,
f'not runnable on some drivers, skipping')
continue
print_status(status_algo_dataset, 'start benchmarking')
for driver in drivers:
status = f'algo: {algo}, dataset: {dataset.name}, tool: {str(driver.tool_name())}'
print_status(status, 'start benchmarking')
result = driver.run(dataset, algo)
print_status(status, 'finish benchmarking')
summary.add_measurement(
driver.tool_name(), dataset, algo, result)
print_status(status_algo_dataset, 'finish benchmarking')
finally:
summary.dump(args.format, args.output, args.printer)
if __name__ == '__main__':
main()