Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tuto-GUDHI-representations.ipynb is failing with gudhi 3.7.0 #65

Closed
VincentRouvreau opened this issue Jan 3, 2023 · 3 comments · Fixed by GUDHI/gudhi-devel#782
Closed

Comments

@VincentRouvreau
Copy link
Contributor

Since gudhi 3.7.0, Tuto-GUDHI-representations.ipynb is failing on the model = model.fit(train_dgms, train_labs) cell.

Seems to come from GUDHI/gudhi-devel#719

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[22], line 1
----> 1 model = model.fit(train_dgms, train_labs)

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/model_selection/_search.py:875, in BaseSearchCV.fit(self, X, y, groups, **fit_params)
    869     results = self._format_results(
    870         all_candidate_params, n_splits, all_out, all_more_results
    871     )
    873     return results
--> 875 self._run_search(evaluate_candidates)
    877 # multimetric is determined here because in the case of a callable
    878 # self.scoring the return type is only known after calling
    879 first_test_score = all_out[0]["test_scores"]

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/model_selection/_search.py:1389, in GridSearchCV._run_search(self, evaluate_candidates)
   1387 def _run_search(self, evaluate_candidates):
   1388     """Search all candidates in param_grid"""
-> 1389     evaluate_candidates(ParameterGrid(self.param_grid))

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/model_selection/_search.py:822, in BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv, more_results)
    814 if self.verbose > 0:
    815     print(
    816         "Fitting {0} folds for each of {1} candidates,"
    817         " totalling {2} fits".format(
    818             n_splits, n_candidates, n_candidates * n_splits
    819         )
    820     )
--> 822 out = parallel(
    823     delayed(_fit_and_score)(
    824         clone(base_estimator),
    825         X,
    826         y,
    827         train=train,
    828         test=test,
    829         parameters=parameters,
    830         split_progress=(split_idx, n_splits),
    831         candidate_progress=(cand_idx, n_candidates),
    832         **fit_and_score_kwargs,
    833     )
    834     for (cand_idx, parameters), (split_idx, (train, test)) in product(
    835         enumerate(candidate_params), enumerate(cv.split(X, y, groups))
    836     )
    837 )
    839 if len(out) < 1:
    840     raise ValueError(
    841         "No fits were performed. "
    842         "Was the CV iterator empty? "
    843         "Were there no candidates?"
    844     )

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/parallel.py:1088, in Parallel.__call__(self, iterable)
   1085 if self.dispatch_one_batch(iterator):
   1086     self._iterating = self._original_iterator is not None
-> 1088 while self.dispatch_one_batch(iterator):
   1089     pass
   1091 if pre_dispatch == "all" or n_jobs == 1:
   1092     # The iterable was consumed all at once by the above for loop.
   1093     # No need to wait for async callbacks to trigger to
   1094     # consumption.

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/parallel.py:901, in Parallel.dispatch_one_batch(self, iterator)
    899     return False
    900 else:
--> 901     self._dispatch(tasks)
    902     return True

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/parallel.py:819, in Parallel._dispatch(self, batch)
    817 with self._lock:
    818     job_idx = len(self._jobs)
--> 819     job = self._backend.apply_async(batch, callback=cb)
    820     # A job can complete so quickly than its callback is
    821     # called before we get here, causing self._jobs to
    822     # grow. To ensure correct results ordering, .insert is
    823     # used (rather than .append) in the following line
    824     self._jobs.insert(job_idx, job)

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/_parallel_backends.py:208, in SequentialBackend.apply_async(self, func, callback)
    206 def apply_async(self, func, callback=None):
    207     """Schedule a func to be run"""
--> 208     result = ImmediateResult(func)
    209     if callback:
    210         callback(result)

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/_parallel_backends.py:597, in ImmediateResult.__init__(self, batch)
    594 def __init__(self, batch):
    595     # Don't delay the application, to avoid keeping the input
    596     # arguments in memory
--> 597     self.results = batch()

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/parallel.py:288, in BatchedCalls.__call__(self)
    284 def __call__(self):
    285     # Set the default nested backend to self._backend but do not set the
    286     # change the default number of processes to -1
    287     with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 288         return [func(*args, **kwargs)
    289                 for func, args, kwargs in self.items]

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/joblib/parallel.py:288, in <listcomp>(.0)
    284 def __call__(self):
    285     # Set the default nested backend to self._backend but do not set the
    286     # change the default number of processes to -1
    287     with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 288         return [func(*args, **kwargs)
    289                 for func, args, kwargs in self.items]

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/utils/fixes.py:117, in _FuncWrapper.__call__(self, *args, **kwargs)
    115 def __call__(self, *args, **kwargs):
    116     with config_context(**self.config):
--> 117         return self.function(*args, **kwargs)

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/model_selection/_validation.py:672, in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, split_progress, candidate_progress, error_score)
    670     cloned_parameters = {}
    671     for k, v in parameters.items():
--> 672         cloned_parameters[k] = clone(v, safe=False)
    674     estimator = estimator.set_params(**cloned_parameters)
    676 start_time = time.time()

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/base.py:87, in clone(estimator, safe)
     79             raise TypeError(
     80                 "Cannot clone object '%s' (type %s): "
     81                 "it does not seem to be a scikit-learn "
     82                 "estimator as it does not implement a "
     83                 "'get_params' method." % (repr(estimator), type(estimator))
     84             )
     86 klass = estimator.__class__
---> 87 new_object_params = estimator.get_params(deep=False)
     88 for name, param in new_object_params.items():
     89     new_object_params[name] = clone(param, safe=False)

File ~/miniconda3/envs/test_notebooks/lib/python3.8/site-packages/sklearn/base.py:170, in BaseEstimator.get_params(self, deep)
    168 out = dict()
    169 for key in self._get_param_names():
--> 170     value = getattr(self, key)
    171     if deep and hasattr(value, "get_params") and not isinstance(value, type):
    172         deep_items = value.get_params().items()

AttributeError: 'Landscape' object has no attribute 'sample_range'
@mglisse
Copy link
Member

mglisse commented Jan 3, 2023

That happens when calling get_params before fit: Landscape().get_params(), which can happen naturally. Adding self.sample_range = self.sample_range_init in Landscape.__init__ would likely kill the warning, but it may be a sign that what I did in GUDHI/gudhi-devel#719 does not match sklearn's expectations 😞

@VincentRouvreau
Copy link
Contributor Author

To be tested with gudhi > 3.7.0

VincentRouvreau added a commit to VincentRouvreau/TDA-tutorial that referenced this issue Jan 5, 2023
@VincentRouvreau
Copy link
Contributor Author

Tested and ok with gudhi 3.8.0

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging a pull request may close this issue.

2 participants