Skip to content

Evaluate and plot symbolic expressions based on simulation results #5897

Evaluate and plot symbolic expressions based on simulation results

Evaluate and plot symbolic expressions based on simulation results #5897

name: Benchmark Collection
on:
push:
branches:
- develop
- master
pull_request:
branches:
- master
- develop
merge_group:
workflow_dispatch:
schedule:
- cron: '48 4 * * *'
jobs:
build:
name: Benchmark Collection
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
python-version: [ "3.9" ]
extract_subexpressions: ["true", "false"]
env:
AMICI_EXTRACT_CSE: ${{ matrix.extract_subexpressions }}
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- uses: actions/checkout@v3
with:
fetch-depth: 20
# install dependencies
- name: apt
run: |
sudo apt-get update \
&& sudo apt-get install -y swig libatlas-base-dev
- run: echo "${HOME}/.local/bin/" >> $GITHUB_PATH
# install AMICI
- name: Create AMICI sdist
run: |
pip3 install build && cd python/sdist && python3 -m build --sdist
- name: Install AMICI sdist
run: |
pip3 install --user petab[vis] && \
AMICI_PARALLEL_COMPILE=2 pip3 install -v --user \
$(ls -t python/sdist/dist/amici-*.tar.gz | head -1)[petab,test,vis]
# retrieve test models
- name: Download and test benchmark collection
run: |
git clone --depth 1 https://github.com/benchmarking-initiative/Benchmark-Models-PEtab.git \
&& export BENCHMARK_COLLECTION="$(pwd)/Benchmark-Models-PEtab/Benchmark-Models/" \
&& AMICI_PARALLEL_COMPILE=2 tests/benchmark-models/test_benchmark_collection.sh
# run gradient checks
- name: Run Gradient Checks
run: |
pip install git+https://github.com/ICB-DCM/fiddy.git \
&& cd tests/benchmark-models && pytest ./test_petab_benchmark.py
# upload results
- uses: actions/upload-artifact@v3
with:
name: computation times
path: |
tests/benchmark-models/computation_times.csv
tests/benchmark-models/computation_times.png