Skip to content

Commit

Permalink
fix bug on raising warnings
Browse files Browse the repository at this point in the history
fix


fix
  • Loading branch information
AldoGl committed Nov 21, 2022
1 parent 2cba7c8 commit 034937d
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 13 deletions.
6 changes: 4 additions & 2 deletions black_it/calibrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import os
import textwrap
import time
import warnings
from typing import Callable, Dict, List, Optional, Tuple, Union

import numpy as np
Expand Down Expand Up @@ -95,10 +96,11 @@ def __init__( # pylint: disable=too-many-arguments
self.N = self.real_data.shape[0]
else:
if sim_length != self.real_data.shape[0]:
raise RuntimeWarning(
warnings.warn(
"The length of real time series is different from the simulation length, "
f"got {self.real_data.shape[0]} and {sim_length}. This may or may not be a problem depending "
"on the loss function used."
"on the loss function used.",
RuntimeWarning,
)
self.N = sim_length
self.D = self.real_data.shape[1]
Expand Down
6 changes: 4 additions & 2 deletions black_it/samplers/gaussian_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

"""This module contains the implementation of the Gaussian process-based sampling."""
import random
import warnings
from enum import Enum
from typing import Optional, Tuple, cast

Expand Down Expand Up @@ -164,8 +165,9 @@ def sample_batch(
X, Y = existing_points, np.atleast_2d(existing_losses).T

if X.shape[0] > 500:
raise RuntimeWarning(
"Standard GP evaluations can be expensive for large datasets, consider implementing a sparse GP"
warnings.warn(
"Standard GP evaluations can be expensive for large datasets, consider implementing a sparse GP",
RuntimeWarning,
)

# initialize GP class from GPy with a Matern kernel by default
Expand Down
23 changes: 14 additions & 9 deletions tests/test_samplers/test_gaussian_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,12 @@ def setup(self) -> None:
self.xys, self.losses = self._construct_fake_grid()

@classmethod
def _construct_fake_grid(cls) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
def _construct_fake_grid(
cls, n: int = 6
) -> Tuple[NDArray[np.float64], NDArray[np.float64]]:
"""Construct a fake grid of evaluated losses."""
xs = np.linspace(0, 1, 6)
ys = np.linspace(0, 1, 6)
xs = np.linspace(0, 1, n)
ys = np.linspace(0, 1, n)
xys_list = []
losses_list = []

Expand Down Expand Up @@ -106,17 +108,20 @@ def test_gaussian_process_sample_warning_too_large_dataset() -> None:
parameters_precision=np.array([0.01, 0.01]),
verbose=False,
)
xs = np.linspace(0, 1, 1000)
ys = np.linspace(0, 1, 3)
existing_points = xs.reshape((-1, 1)).dot(ys.reshape((1, -1)))
losses = np.zeros(existing_points.shape)
with pytest.raises(
# very high number of samples
(
xys,
losses,
) = TestGaussianProcess2D._construct_fake_grid( # pylint: disable=protected-access
n=23
)
with pytest.warns(
RuntimeWarning,
match="Standard GP evaluations can be expensive "
"for large datasets, consider implementing "
"a sparse GP",
):
sampler.sample(param_grid, existing_points, losses)
sampler.sample(param_grid, xys, losses)


def test_gaussian_process_sample_wrong_acquisition() -> None:
Expand Down

0 comments on commit 034937d

Please sign in to comment.