Skip to content

Commit

Permalink
Fix test_priors_to_measurements
Browse files Browse the repository at this point in the history
Fixes an issue in `test_priors_to_measurements` which led to evaluating the prior at the wrong parameter values
(using the location parameter of the prior instead of the actually estimated parameters).
The problem was in the test code, not the tested code.
  • Loading branch information
dweindl committed Dec 9, 2024
1 parent 9baf981 commit 749168c
Showing 1 changed file with 50 additions and 21 deletions.
71 changes: 50 additions & 21 deletions tests/v1/test_priors.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,15 @@
from scipy.stats import norm

import petab.v1
from petab.v1 import get_simulation_conditions
from petab.v1 import (
ESTIMATE,
MEASUREMENT,
OBJECTIVE_PRIOR_TYPE,
OBSERVABLE_ID,
SIMULATION,
get_simulation_conditions,
get_simulation_df,
)
from petab.v1.priors import priors_to_measurements


Expand All @@ -17,20 +25,29 @@
)
def test_priors_to_measurements(problem_id):
"""Test the conversion of priors to measurements."""
# setup
petab_problem_priors: petab.v1.Problem = (
benchmark_models_petab.get_problem(problem_id)
)
petab_problem_priors.visualization_df = None
assert petab.v1.lint_problem(petab_problem_priors) is False

if problem_id == "Isensee_JCB2018":
# required to match the stored simulation results below
petab.v1.flatten_timepoint_specific_output_overrides(
petab_problem_priors
)
assert petab.v1.lint_problem(petab_problem_priors) is False

original_problem = deepcopy(petab_problem_priors)
x_scaled_dict = dict(
zip(
original_problem.x_free_ids,
original_problem.x_nominal_free_scaled,
strict=True,
)
)

# convert priors to measurements
petab_problem_measurements = priors_to_measurements(petab_problem_priors)

# check that the original problem is not modified
Expand All @@ -45,6 +62,7 @@ def test_priors_to_measurements(problem_id):
getattr(original_problem, attr)
)
).empty, diff

# check that measurements and observables were added
assert petab.v1.lint_problem(petab_problem_measurements) is False
assert (
Expand All @@ -59,6 +77,7 @@ def test_priors_to_measurements(problem_id):
petab_problem_measurements.measurement_df.shape[0]
> petab_problem_priors.measurement_df.shape[0]
)

# ensure we didn't introduce any new conditions
assert len(
get_simulation_conditions(petab_problem_measurements.measurement_df)
Expand All @@ -67,26 +86,40 @@ def test_priors_to_measurements(problem_id):
# verify that the objective function value is the same

# load/construct the simulation results
simulation_df_priors = petab.v1.get_simulation_df(
simulation_df_priors = get_simulation_df(
Path(
benchmark_models_petab.MODELS_DIR,
problem_id,
f"simulatedData_{problem_id}.tsv",
)
)
simulation_df_measurements = pd.concat(
[
petab_problem_measurements.measurement_df.rename(
columns={petab.v1.MEASUREMENT: petab.v1.SIMULATION}
)[
petab_problem_measurements.measurement_df[
petab.v1.C.OBSERVABLE_ID
].str.startswith("prior_")
],
simulation_df_priors,
# for the prior observables, we need to "simulate" the model with the
# nominal parameter values
simulated_prior_observables = (
petab_problem_measurements.measurement_df.rename(
columns={MEASUREMENT: SIMULATION}
)[
petab_problem_measurements.measurement_df[
OBSERVABLE_ID
].str.startswith("prior_")
]
)

def apply_parameter_values(row):
# apply the parameter values to the observable formula for the prior
if row[OBSERVABLE_ID].startswith("prior_"):
row[SIMULATION] = x_scaled_dict[
row[OBSERVABLE_ID].removeprefix("prior_")
]
return row

simulated_prior_observables = simulated_prior_observables.apply(
apply_parameter_values, axis=1
)
simulation_df_measurements = pd.concat(
[simulation_df_priors, simulated_prior_observables]
)

llh_priors = petab.v1.calculate_llh_for_table(
petab_problem_priors.measurement_df,
simulation_df_priors,
Expand All @@ -102,10 +135,8 @@ def test_priors_to_measurements(problem_id):

# get prior objective function contribution
parameter_ids = petab_problem_priors.parameter_df.index.values[
(petab_problem_priors.parameter_df[petab.v1.ESTIMATE] == 1)
& petab_problem_priors.parameter_df[
petab.v1.OBJECTIVE_PRIOR_TYPE
].notna()
(petab_problem_priors.parameter_df[ESTIMATE] == 1)
& petab_problem_priors.parameter_df[OBJECTIVE_PRIOR_TYPE].notna()
]
priors = petab.v1.get_priors_from_df(
petab_problem_priors.parameter_df,
Expand All @@ -117,9 +148,7 @@ def test_priors_to_measurements(problem_id):
prior_type, prior_pars, par_scale, par_bounds = prior
if prior_type == petab.v1.PARAMETER_SCALE_NORMAL:
prior_contrib += norm.logpdf(
petab_problem_priors.x_nominal_free_scaled[
petab_problem_priors.x_free_ids.index(parameter_id)
],
x_scaled_dict[parameter_id],
loc=prior_pars[0],
scale=prior_pars[1],
)
Expand All @@ -134,4 +163,4 @@ def test_priors_to_measurements(problem_id):
llh_priors + prior_contrib, llh_measurements, rtol=1e-3, atol=1e-16
), (llh_priors + prior_contrib, llh_measurements)
# check that the tolerance is not too high
assert np.abs(prior_contrib) > 1e-3 * np.abs(llh_priors)
assert np.abs(prior_contrib) > 1e-8 * np.abs(llh_priors)

0 comments on commit 749168c

Please sign in to comment.