Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix test_priors_to_measurements #336

Merged
merged 3 commits into from
Dec 9, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 52 additions & 21 deletions tests/v1/test_priors.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,15 @@
from scipy.stats import norm

import petab.v1
from petab.v1 import get_simulation_conditions
from petab.v1 import (
ESTIMATE,
MEASUREMENT,
OBJECTIVE_PRIOR_TYPE,
OBSERVABLE_ID,
SIMULATION,
get_simulation_conditions,
get_simulation_df,
)
from petab.v1.priors import priors_to_measurements


Expand All @@ -17,20 +25,31 @@
)
def test_priors_to_measurements(problem_id):
"""Test the conversion of priors to measurements."""
# setup
petab_problem_priors: petab.v1.Problem = (
benchmark_models_petab.get_problem(problem_id)
)
petab_problem_priors.visualization_df = None
assert petab.v1.lint_problem(petab_problem_priors) is False

if problem_id == "Isensee_JCB2018":
# required to match the stored simulation results below
petab.v1.flatten_timepoint_specific_output_overrides(
petab_problem_priors
)
assert petab.v1.lint_problem(petab_problem_priors) is False

original_problem = deepcopy(petab_problem_priors)
# All priors in this test case are defined on parameter scale, hence
# the dummy measurements will take the scaled nominal values.
x_scaled_dict = dict(
dweindl marked this conversation as resolved.
Show resolved Hide resolved
zip(
original_problem.x_free_ids,
original_problem.x_nominal_free_scaled,
strict=True,
)
)
Comment on lines +44 to +50
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should it really be scaled values here? I assume the prior is either (1) unscaled or (2) on parameterScale. The former means this should be unscaled, and the latter is already handled in the observable formulae for these dummy prior observables, so no scaling is required here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So far, this test only handles PARAMETER_SCALE_NORMAL (to be extended in #329). Yes, the scaling is handled in the observable formulae, but here, we need to write the simulation table. For that, we do not evaluate the observable formula using the given parameters, but write the result directly. Therefore, we need the scaled ones.


# convert priors to measurements
petab_problem_measurements = priors_to_measurements(petab_problem_priors)

# check that the original problem is not modified
Expand All @@ -45,6 +64,7 @@ def test_priors_to_measurements(problem_id):
getattr(original_problem, attr)
)
).empty, diff

# check that measurements and observables were added
assert petab.v1.lint_problem(petab_problem_measurements) is False
assert (
Expand All @@ -59,6 +79,7 @@ def test_priors_to_measurements(problem_id):
petab_problem_measurements.measurement_df.shape[0]
> petab_problem_priors.measurement_df.shape[0]
)

# ensure we didn't introduce any new conditions
assert len(
get_simulation_conditions(petab_problem_measurements.measurement_df)
Expand All @@ -67,26 +88,40 @@ def test_priors_to_measurements(problem_id):
# verify that the objective function value is the same

# load/construct the simulation results
simulation_df_priors = petab.v1.get_simulation_df(
simulation_df_priors = get_simulation_df(
Path(
benchmark_models_petab.MODELS_DIR,
problem_id,
f"simulatedData_{problem_id}.tsv",
)
)
simulation_df_measurements = pd.concat(
[
petab_problem_measurements.measurement_df.rename(
columns={petab.v1.MEASUREMENT: petab.v1.SIMULATION}
)[
petab_problem_measurements.measurement_df[
petab.v1.C.OBSERVABLE_ID
].str.startswith("prior_")
],
simulation_df_priors,
# for the prior observables, we need to "simulate" the model with the
# nominal parameter values
simulated_prior_observables = (
petab_problem_measurements.measurement_df.rename(
columns={MEASUREMENT: SIMULATION}
)[
petab_problem_measurements.measurement_df[
OBSERVABLE_ID
].str.startswith("prior_")
]
)

def apply_parameter_values(row):
# apply the parameter values to the observable formula for the prior
if row[OBSERVABLE_ID].startswith("prior_"):
row[SIMULATION] = x_scaled_dict[
row[OBSERVABLE_ID].removeprefix("prior_")
]
return row

simulated_prior_observables = simulated_prior_observables.apply(
apply_parameter_values, axis=1
)
simulation_df_measurements = pd.concat(
[simulation_df_priors, simulated_prior_observables]
)

llh_priors = petab.v1.calculate_llh_for_table(
petab_problem_priors.measurement_df,
simulation_df_priors,
Expand All @@ -102,10 +137,8 @@ def test_priors_to_measurements(problem_id):

# get prior objective function contribution
parameter_ids = petab_problem_priors.parameter_df.index.values[
(petab_problem_priors.parameter_df[petab.v1.ESTIMATE] == 1)
& petab_problem_priors.parameter_df[
petab.v1.OBJECTIVE_PRIOR_TYPE
].notna()
(petab_problem_priors.parameter_df[ESTIMATE] == 1)
& petab_problem_priors.parameter_df[OBJECTIVE_PRIOR_TYPE].notna()
]
priors = petab.v1.get_priors_from_df(
petab_problem_priors.parameter_df,
Expand All @@ -117,9 +150,7 @@ def test_priors_to_measurements(problem_id):
prior_type, prior_pars, par_scale, par_bounds = prior
if prior_type == petab.v1.PARAMETER_SCALE_NORMAL:
prior_contrib += norm.logpdf(
petab_problem_priors.x_nominal_free_scaled[
petab_problem_priors.x_free_ids.index(parameter_id)
],
x_scaled_dict[parameter_id],
loc=prior_pars[0],
scale=prior_pars[1],
)
Expand All @@ -134,4 +165,4 @@ def test_priors_to_measurements(problem_id):
llh_priors + prior_contrib, llh_measurements, rtol=1e-3, atol=1e-16
), (llh_priors + prior_contrib, llh_measurements)
# check that the tolerance is not too high
assert np.abs(prior_contrib) > 1e-3 * np.abs(llh_priors)
assert np.abs(prior_contrib) > 1e-8 * np.abs(llh_priors)
Loading