Skip to content

Commit

Permalink
Don't forcefully write benchmark results on disk
Browse files Browse the repository at this point in the history
It is better if you can store the result file where ever you
want just by editing the benchmark script.
  • Loading branch information
soininen committed Apr 24, 2024
1 parent eddedca commit 29e89b0
Show file tree
Hide file tree
Showing 7 changed files with 34 additions and 36 deletions.
13 changes: 8 additions & 5 deletions benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,24 @@

This Python package contains performance benchmarks for `spinedb_api`.
The benchmarks use [`pyperf`](https://pyperf.readthedocs.io/en/latest/index.html)
which can be installed by installing the optional developer dependencies:
which is installed as part of the optional developer dependencies:

```commandline
python -mpip install .[dev]
python -mpip install -e .[dev]
```

Each Python file is an individual script
that writes the run results into a common `.json` file.
Each Python file is a self-contained script
that benchmarks some aspect of the DB API.
Benchmark results can be optionally written into a`.json` file
by modifying the script.
This may be handy for comparing different branches/commits/changes etc.
The file can be inspected by

```commandline
python -mpyperf show <benchmark file.json>
```

Benchmarks from e.g. different commits/branches can be compared by
Benchmark files from e.g. different commits/branches can be compared by

```commandline
python -mpyperf compare_to <benchmark file 1.json> <benchmark file 2.json>
Expand Down
9 changes: 4 additions & 5 deletions benchmarks/datetime_from_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from typing import Any, Sequence, Tuple
import pyperf
from spinedb_api import DateTime, from_database, to_database
from benchmarks.utils import run_file_name


def build_datetimes(count: int) -> Sequence[DateTime]:
Expand Down Expand Up @@ -41,16 +40,16 @@ def value_from_database(loops: int, db_values_and_types: Sequence[Tuple[Any, str
return duration


def run_benchmark():
file_name = run_file_name()
def run_benchmark(file_name):
runner = pyperf.Runner(loops=10)
inner_loops = 1000
db_values_and_types = [to_database(x) for x in build_datetimes(inner_loops)]
benchmark = runner.bench_time_func(
"from_database[DateTime]", value_from_database, db_values_and_types, inner_loops=inner_loops
)
pyperf.add_runs(file_name, benchmark)
if file_name:
pyperf.add_runs(file_name, benchmark)


if __name__ == "__main__":
run_benchmark()
run_benchmark("")
9 changes: 4 additions & 5 deletions benchmarks/map_from_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import time
import pyperf
from spinedb_api import from_database, to_database
from benchmarks.utils import build_even_map, run_file_name
from benchmarks.utils import build_even_map


def value_from_database(loops, db_value, value_type):
Expand All @@ -17,8 +17,7 @@ def value_from_database(loops, db_value, value_type):
return duration


def run_benchmark():
file_name = run_file_name()
def run_benchmark(file_name):
runner = pyperf.Runner(loops=3)
runs = {
"value_from_database[Map(10, 10, 100)]": {"dimensions": (10, 10, 100)},
Expand All @@ -32,9 +31,9 @@ def run_benchmark():
db_value,
value_type,
)
if benchmark is not None:
if file_name and benchmark is not None:
pyperf.add_runs(file_name, benchmark)


if __name__ == "__main__":
run_benchmark()
run_benchmark("")
13 changes: 6 additions & 7 deletions benchmarks/mapped_item_getitem.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from typing import Dict
from spinedb_api import DatabaseMapping
from spinedb_api.db_mapping_base import PublicItem
from benchmarks.utils import run_file_name


def use_subscript_operator(loops: int, items: PublicItem, field: Dict):
Expand All @@ -20,7 +19,7 @@ def use_subscript_operator(loops: int, items: PublicItem, field: Dict):
return duration


def run_benchmark():
def run_benchmark(file_name):
runner = pyperf.Runner()
inner_loops = 1000
object_class_names = [str(i) for i in range(inner_loops)]
Expand Down Expand Up @@ -52,11 +51,11 @@ def run_benchmark():
inner_loops=inner_loops,
),
]
file_name = run_file_name()
for benchmark in benchmarks:
if benchmark is not None:
pyperf.add_runs(file_name, benchmark)
if file_name:
for benchmark in benchmarks:
if benchmark is not None:
pyperf.add_runs(file_name, benchmark)


if __name__ == "__main__":
run_benchmark()
run_benchmark("")
8 changes: 4 additions & 4 deletions benchmarks/update_default_value_to_different_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import time
import pyperf
from spinedb_api import DatabaseMapping, to_database
from benchmarks.utils import build_even_map, run_file_name
from benchmarks.utils import build_even_map


def update_default_value(loops, db_map, first_db_value, first_value_type, second_db_value, second_value_type):
Expand All @@ -27,7 +27,7 @@ def update_default_value(loops, db_map, first_db_value, first_value_type, second
return total_time


def run_benchmark():
def run_benchmark(file_name: str):
first_value, first_type = to_database(None)
second_value, second_type = to_database(build_even_map())
with DatabaseMapping("sqlite://", create=True) as db_map:
Expand All @@ -45,8 +45,8 @@ def run_benchmark():
second_value,
second_type,
)
pyperf.add_runs(run_file_name(), benchmark)
pyperf.add_runs(file_name, benchmark)


if __name__ == "__main__":
run_benchmark()
run_benchmark("")
12 changes: 7 additions & 5 deletions benchmarks/update_default_value_to_same_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@
the default value is somewhat complex Map and the update does not change anything.
"""
import time
from typing import Optional
import pyperf
from spinedb_api import DatabaseMapping, to_database
from benchmarks.utils import build_even_map, run_file_name
from benchmarks.utils import build_even_map


def update_default_value(loops, db_map, value, value_type):
def update_default_value(loops: int, db_map: DatabaseMapping, value: bytes, value_type: Optional[str]) -> float:
total_time = 0.0
for counter in range(loops):
start = time.perf_counter()
Expand All @@ -23,7 +24,7 @@ def update_default_value(loops, db_map, value, value_type):
return total_time


def run_benchmark():
def run_benchmark(file_name: str):
value, value_type = to_database(build_even_map())
with DatabaseMapping("sqlite://", create=True) as db_map:
db_map.add_entity_class_item(name="Object")
Expand All @@ -34,8 +35,9 @@ def run_benchmark():
benchmark = runner.bench_time_func(
"update_parameter_definition_item[Map,Map]", update_default_value, db_map, value, value_type
)
pyperf.add_runs(run_file_name(), benchmark)
if file_name:
pyperf.add_runs(file_name, benchmark)


if __name__ == "__main__":
run_benchmark()
run_benchmark("")
6 changes: 1 addition & 5 deletions benchmarks/utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import datetime
import math
from typing import Sequence
from spinedb_api import __version__, DateTime, Map
from spinedb_api import DateTime, Map


def build_map(size: int) -> Map:
Expand All @@ -27,7 +27,3 @@ def build_even_map(shape: Sequence[int] = (10, 10, 10)) -> Map:
xs.append(DateTime(start + datetime.timedelta(hours=i)))
ys.append(build_even_map(shape[1:]))
return Map(xs, ys)


def run_file_name() -> str:
return f"benchmark-{__version__}.json"

0 comments on commit 29e89b0

Please sign in to comment.