From 75322610cec95b41f1a9a019effe3ac0fb64d65c Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Fri, 15 Nov 2024 08:20:21 -0800 Subject: [PATCH 1/4] Remove FixedNoiseDataset (#2626) Summary: Pull Request resolved: https://github.com/pytorch/botorch/pull/2626 This was deprecated and marked for removal in v0.11. Reviewed By: esantorella Differential Revision: D66003588 fbshipit-source-id: f3c3503d02c0fa0a9596086b336482bb95491f6b --- botorch/utils/datasets.py | 34 ---------------------------------- test/utils/test_datasets.py | 14 +------------- 2 files changed, 1 insertion(+), 47 deletions(-) diff --git a/botorch/utils/datasets.py b/botorch/utils/datasets.py index f11f5c80e7..1007be8c0a 100644 --- a/botorch/utils/datasets.py +++ b/botorch/utils/datasets.py @@ -8,7 +8,6 @@ from __future__ import annotations -import warnings from typing import Any import torch @@ -149,39 +148,6 @@ def __eq__(self, other: Any) -> bool: ) -class FixedNoiseDataset(SupervisedDataset): - r"""A SupervisedDataset with an additional field `Yvar` that stipulates - observations variances so that `Y[i] ~ N(f(X[i]), Yvar[i])`. - - NOTE: This is deprecated. Use `SupervisedDataset` instead. - Will be removed in a future release (~v0.11). - """ - - def __init__( - self, - X: BotorchContainer | Tensor, - Y: BotorchContainer | Tensor, - Yvar: BotorchContainer | Tensor, - feature_names: list[str], - outcome_names: list[str], - validate_init: bool = True, - ) -> None: - r"""Initialize a `FixedNoiseDataset` -- deprecated!""" - warnings.warn( - "`FixedNoiseDataset` is deprecated. Use `SupervisedDataset` instead.", - DeprecationWarning, - stacklevel=2, - ) - super().__init__( - X=X, - Y=Y, - feature_names=feature_names, - outcome_names=outcome_names, - Yvar=Yvar, - validate_init=validate_init, - ) - - class RankingDataset(SupervisedDataset): r"""A SupervisedDataset whose labelled pairs `(x, y)` consist of m-ary combinations `x ∈ Z^{m}` of elements from a ground set `Z = (z_1, ...)` and ranking vectors diff --git a/test/utils/test_datasets.py b/test/utils/test_datasets.py index 22d8c24a50..de741c1a60 100644 --- a/test/utils/test_datasets.py +++ b/test/utils/test_datasets.py @@ -10,7 +10,6 @@ from botorch.utils.containers import DenseContainer, SliceContainer from botorch.utils.datasets import ( ContextualDataset, - FixedNoiseDataset, MultiTaskDataset, RankingDataset, SupervisedDataset, @@ -129,7 +128,7 @@ def test_fixedNoise(self): Yvar = rand(3, 1) feature_names = ["x1", "x2"] outcome_names = ["y"] - dataset = FixedNoiseDataset( + dataset = SupervisedDataset( X=X, Y=Y, Yvar=Yvar, @@ -142,17 +141,6 @@ def test_fixedNoise(self): self.assertEqual(dataset.feature_names, feature_names) self.assertEqual(dataset.outcome_names, outcome_names) - with self.assertRaisesRegex( - ValueError, "`Y` and `Yvar`" - ), self.assertWarnsRegex(DeprecationWarning, "SupervisedDataset"): - FixedNoiseDataset( - X=X, - Y=Y, - Yvar=Yvar.squeeze(), - feature_names=feature_names, - outcome_names=outcome_names, - ) - def test_ranking(self): # Test `_validate` X_val = rand(16, 2) From 260f95733d362a206ac35684bd9f39d2afb18057 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Fri, 15 Nov 2024 08:20:21 -0800 Subject: [PATCH 2/4] Clean up support for legacy format non-linear constraints (#2627) Summary: Pull Request resolved: https://github.com/pytorch/botorch/pull/2627 Legacy non-linear inequality constraint format has been deprecated since https://github.com/pytorch/botorch/pull/1793. Reviewed By: esantorella Differential Revision: D66003879 fbshipit-source-id: a1ddfa26182f74b80c0c8aa17c801ad74b81da04 --- botorch/generation/gen.py | 15 +-------------- botorch/generation/utils.py | 30 ------------------------------ test/generation/test_utils.py | 22 ---------------------- test/optim/test_optimize.py | 14 -------------- 4 files changed, 1 insertion(+), 80 deletions(-) diff --git a/botorch/generation/gen.py b/botorch/generation/gen.py index 6e6e047f05..0d70681fd5 100644 --- a/botorch/generation/gen.py +++ b/botorch/generation/gen.py @@ -22,10 +22,7 @@ from botorch.acquisition import AcquisitionFunction from botorch.exceptions.errors import OptimizationGradientError from botorch.exceptions.warnings import OptimizationWarning -from botorch.generation.utils import ( - _convert_nonlinear_inequality_constraints, - _remove_fixed_features_from_optimization, -) +from botorch.generation.utils import _remove_fixed_features_from_optimization from botorch.logging import logger from botorch.optim.parameter_constraints import ( _arrayify, @@ -136,16 +133,6 @@ def gen_candidates_scipy( else: reduced_domain = None not in fixed_features.values() - if nonlinear_inequality_constraints: - if not isinstance(nonlinear_inequality_constraints, list): - raise ValueError( - "`nonlinear_inequality_constraints` must be a list of tuples, " - f"got {type(nonlinear_inequality_constraints)}." - ) - nonlinear_inequality_constraints = _convert_nonlinear_inequality_constraints( - nonlinear_inequality_constraints - ) - if reduced_domain: _no_fixed_features = _remove_fixed_features_from_optimization( fixed_features=fixed_features, diff --git a/botorch/generation/utils.py b/botorch/generation/utils.py index 44bba19c24..a6cbaa67ef 100644 --- a/botorch/generation/utils.py +++ b/botorch/generation/utils.py @@ -6,12 +6,10 @@ from __future__ import annotations -import warnings from collections.abc import Callable from dataclasses import dataclass import torch - from botorch.acquisition import AcquisitionFunction, FixedFeatureAcquisitionFunction from botorch.optim.parameter_constraints import ( _generate_unfixed_lin_constraints, @@ -20,34 +18,6 @@ from torch import Tensor -def _convert_nonlinear_inequality_constraints( - nonlinear_inequality_constraints: list[Callable | tuple[Callable, bool]], -) -> list[tuple[Callable, bool]]: - """Convert legacy defintions of nonlinear inequality constraints into the new - format. Assumes intra-point constraints. - """ - nlcs = [] - legacy = False - # return nonlinear_inequality_constraints - for nlc in nonlinear_inequality_constraints: - if callable(nlc): - # old style --> convert - nlcs.append((nlc, True)) - legacy = True - else: - nlcs.append(nlc) - if legacy: - warnings.warn( - "The `nonlinear_inequality_constraints` argument is expected " - "take a list of tuples. Passing a list of callables " - "will result in an error in future versions.", - DeprecationWarning, - stacklevel=3, - ) - - return nlcs - - def _flip_sub_unique(x: Tensor, k: int) -> Tensor: """Get the first k unique elements of a single-dimensional tensor, traversing the tensor from the back. diff --git a/test/generation/test_utils.py b/test/generation/test_utils.py index c5fd0e7c83..6c6176a2b9 100644 --- a/test/generation/test_utils.py +++ b/test/generation/test_utils.py @@ -12,7 +12,6 @@ from botorch.acquisition import FixedFeatureAcquisitionFunction from botorch.generation.utils import ( - _convert_nonlinear_inequality_constraints, _flip_sub_unique, _remove_fixed_features_from_optimization, ) @@ -20,27 +19,6 @@ class TestGenerationUtils(BotorchTestCase): - def test_convert_nonlinear_inequality_constraints(self): - def nlc(x): - return x[..., 2] - - def nlc2(x): - return x[..., 3] - - nlcs = [nlc] - with self.assertWarns(DeprecationWarning): - new_nlcs = _convert_nonlinear_inequality_constraints(nlcs) - self.assertEqual(new_nlcs, [(nlc, True)]) - - nlcs = [(nlc, False)] - new_nlcs = _convert_nonlinear_inequality_constraints(nlcs) - self.assertEqual(new_nlcs, [(nlc, False)]) - - nlcs = [(nlc, False), nlc2] - with self.assertWarns(DeprecationWarning): - new_nlcs = _convert_nonlinear_inequality_constraints(nlcs) - self.assertEqual(new_nlcs, [(nlc, False), (nlc2, True)]) - def test_flip_sub_unique(self): for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} diff --git a/test/optim/test_optimize.py b/test/optim/test_optimize.py index fad021c61b..331b86be55 100644 --- a/test/optim/test_optimize.py +++ b/test/optim/test_optimize.py @@ -953,20 +953,6 @@ def nlc4(x): ) self.assertEqual(candidates.size(), torch.Size([1, 3])) - # Constraints must be passed in as lists - with self.assertRaisesRegex( - ValueError, - "`nonlinear_inequality_constraints` must be a list of tuples, " - "got .", - ): - optimize_acqf( - acq_function=mock_acq_function, - bounds=bounds, - q=1, - nonlinear_inequality_constraints=nlc1, - num_restarts=num_restarts, - batch_initial_conditions=batch_initial_conditions, - ) # batch_initial_conditions must be feasible with self.assertRaisesRegex( ValueError, From 9c7521f9b43988661f2ff73afeb864e9e252b00b Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Fri, 15 Nov 2024 08:20:21 -0800 Subject: [PATCH 3/4] Silence some warnings in unit tests (#2628) Summary: Pull Request resolved: https://github.com/pytorch/botorch/pull/2628 Silences warnings for non-log EI acqfs and deprecated model converter code.This will reduce the noise in test outputs. Reviewed By: esantorella Differential Revision: D66004225 fbshipit-source-id: 680057d027e83f78fb4a2a2cc853c96f340383c1 --- botorch/utils/testing.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/botorch/utils/testing.py b/botorch/utils/testing.py index b318e6a1e2..6df7ad2a67 100644 --- a/botorch/utils/testing.py +++ b/botorch/utils/testing.py @@ -17,7 +17,11 @@ import torch from botorch.acquisition.objective import PosteriorTransform -from botorch.exceptions.warnings import BotorchTensorDimensionWarning, InputDataWarning +from botorch.exceptions.warnings import ( + BotorchTensorDimensionWarning, + InputDataWarning, + NumericsWarning, +) from botorch.models.model import FantasizeMixin, Model from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.posteriors.posterior import Posterior @@ -68,6 +72,16 @@ def setUp(self, suppress_input_warnings: bool = True) -> None: message=r"Data \(input features\) is not", category=InputDataWarning, ) + warnings.filterwarnings( + "ignore", + message="has known numerical issues", + category=NumericsWarning, + ) + warnings.filterwarnings( + "ignore", + message="Model converter code is deprecated", + category=DeprecationWarning, + ) def assertAllClose( self, From 3c2ce15ea2f0b24f0985ec83ed04b36188980f74 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Fri, 15 Nov 2024 09:11:52 -0800 Subject: [PATCH 4/4] Deduplicate shared logic in prune_inferior_points(_multi_objective) (#2629) Summary: Pull Request resolved: https://github.com/pytorch/botorch/pull/2629 There was a lot of repetition between these two methods. This diff extracts the shared parts into a helper method. Also cleaned some pyre complains while at it. Reviewed By: Balandat Differential Revision: D66006629 fbshipit-source-id: 81a1f23d80dfa095d71a79cbd552517a72fef0a8 --- botorch/acquisition/multi_objective/utils.py | 78 ++++-------- botorch/acquisition/utils.py | 122 ++++++++++++------- test/acquisition/test_utils.py | 2 - 3 files changed, 105 insertions(+), 97 deletions(-) diff --git a/botorch/acquisition/multi_objective/utils.py b/botorch/acquisition/multi_objective/utils.py index cca67ad55e..ca45c869e6 100644 --- a/botorch/acquisition/multi_objective/utils.py +++ b/botorch/acquisition/multi_objective/utils.py @@ -10,7 +10,6 @@ from __future__ import annotations -import math import warnings from collections.abc import Callable from math import ceil @@ -18,16 +17,12 @@ import torch from botorch.acquisition import monte_carlo # noqa F401 -from botorch.acquisition.multi_objective.objective import ( - IdentityMCMultiOutputObjective, - MCMultiOutputObjective, -) +from botorch.acquisition.multi_objective.objective import MCMultiOutputObjective +from botorch.acquisition.utils import _prune_inferior_shared_processing from botorch.exceptions.errors import UnsupportedError from botorch.exceptions.warnings import BotorchWarning from botorch.models.deterministic import GenericDeterministicModel -from botorch.models.fully_bayesian import MCMC_DIM from botorch.models.model import Model -from botorch.sampling.get_sampler import get_sampler from botorch.sampling.pathwise.posterior_samplers import get_matheron_path_model from botorch.utils.multi_objective.box_decompositions.box_decomposition import ( BoxDecomposition, @@ -39,9 +34,8 @@ DominatedPartitioning, ) from botorch.utils.multi_objective.pareto import is_non_dominated -from botorch.utils.objective import compute_feasibility_indicator from botorch.utils.sampling import draw_sobol_samples -from botorch.utils.transforms import is_ensemble +from pyre_extensions import assert_is_instance from torch import Tensor @@ -115,40 +109,14 @@ def prune_inferior_points_multi_objective( with `N_nz` the number of points in `X` that have non-zero (empirical, under `num_samples` samples) probability of being pareto optimal. """ - if marginalize_dim is None and is_ensemble(model): - # TODO: Properly deal with marginalizing fully Bayesian models - marginalize_dim = MCMC_DIM - - if X.ndim > 2: - # TODO: support batched inputs (req. dealing with ragged tensors) - raise UnsupportedError( - "Batched inputs `X` are currently unsupported by " - "prune_inferior_points_multi_objective" - ) - if X.size(-2) == 0: - raise ValueError("X must have at least one point.") - if max_frac <= 0 or max_frac > 1.0: - raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}") - max_points = math.ceil(max_frac * X.size(-2)) - with torch.no_grad(): - posterior = model.posterior(X=X) - sampler = get_sampler(posterior, sample_shape=torch.Size([num_samples])) - samples = sampler(posterior) - if objective is None: - objective = IdentityMCMultiOutputObjective() - obj_vals = objective(samples, X=X) - if obj_vals.ndim > 3: - if obj_vals.ndim == 4 and marginalize_dim is not None: - obj_vals = obj_vals.mean(dim=marginalize_dim) - else: - # TODO: support batched inputs (req. dealing with ragged tensors) - raise UnsupportedError( - "Models with multiple batch dims are currently unsupported by" - " prune_inferior_points_multi_objective." - ) - infeas = ~compute_feasibility_indicator( + max_points, obj_vals, infeas = _prune_inferior_shared_processing( + model=model, + X=X, + is_moo=True, + objective=objective, constraints=constraints, - samples=samples, + num_samples=num_samples, + max_frac=max_frac, marginalize_dim=marginalize_dim, ) if infeas.any(): @@ -168,9 +136,9 @@ def prune_inferior_points_multi_objective( def compute_sample_box_decomposition( pareto_fronts: Tensor, - partitioning: BoxDecomposition = DominatedPartitioning, + partitioning: type[BoxDecomposition] = DominatedPartitioning, maximize: bool = True, - num_constraints: int | None = 0, + num_constraints: int = 0, ) -> Tensor: r"""Computes the box decomposition associated with some sampled optimal objectives. This also supports the single-objective and constrained optimization @@ -195,7 +163,10 @@ def compute_sample_box_decomposition( the hyper-rectangles. The number `J` is the smallest number of boxes needed to partition all the Pareto samples. """ - tkwargs = {"dtype": pareto_fronts.dtype, "device": pareto_fronts.device} + tkwargs: dict[str, Any] = { + "dtype": pareto_fronts.dtype, + "device": pareto_fronts.device, + } # We will later compute `norm.log_prob(NEG_INF)`, this is `-inf` if `NEG_INF` is # too small. NEG_INF = -1e10 @@ -214,16 +185,18 @@ def compute_sample_box_decomposition( if M == 1: # Only consider a Pareto front with one element. - extreme_values = weight * torch.max(weight * pareto_fronts, dim=-2).values + extreme_values = assert_is_instance( + weight * torch.max(weight * pareto_fronts, dim=-2).values, Tensor + ) ref_point = weight * ref_point.expand(extreme_values.shape) if maximize: hypercell_bounds = torch.stack( - [ref_point, extreme_values], axis=-2 + [ref_point, extreme_values], dim=-2 ).unsqueeze(-1) else: hypercell_bounds = torch.stack( - [extreme_values, ref_point], axis=-2 + [extreme_values, ref_point], dim=-2 ).unsqueeze(-1) else: bd_list = [] @@ -244,9 +217,7 @@ def compute_sample_box_decomposition( # Add an extra box for the inequality constraint. if K > 0: # `num_pareto_samples x 2 x (J - 1) x K` - feasible_boxes = torch.zeros( - hypercell_bounds.shape[:-1] + torch.Size([K]), **tkwargs - ) + feasible_boxes = torch.zeros(hypercell_bounds.shape[:-1] + (K,), **tkwargs) feasible_boxes[..., 0, :, :] = NEG_INF # `num_pareto_samples x 2 x (J - 1) x (M + K)` @@ -254,7 +225,7 @@ def compute_sample_box_decomposition( # `num_pareto_samples x 2 x 1 x (M + K)` infeasible_box = torch.zeros( - hypercell_bounds.shape[:-2] + torch.Size([1, M + K]), **tkwargs + hypercell_bounds.shape[:-2] + (1, M + K), **tkwargs ) infeasible_box[..., 1, :, M:] = -NEG_INF infeasible_box[..., 0, :, 0:M] = NEG_INF @@ -292,11 +263,12 @@ def random_search_optimizer( - A `num_points x M`-dim Tensor containing the collection of optimal objectives. """ - tkwargs = {"dtype": bounds.dtype, "device": bounds.device} + tkwargs: dict[str, Any] = {"dtype": bounds.dtype, "device": bounds.device} weight = 1.0 if maximize else -1.0 optimal_inputs = torch.tensor([], **tkwargs) optimal_outputs = torch.tensor([], **tkwargs) num_tries = 0 + num_found = 0 ratio = 2 while ratio > 1 and num_tries < max_tries: X = draw_sobol_samples(bounds=bounds, n=pop_size, q=1).squeeze(-2) diff --git a/botorch/acquisition/utils.py b/botorch/acquisition/utils.py index d486629b76..a930488680 100644 --- a/botorch/acquisition/utils.py +++ b/botorch/acquisition/utils.py @@ -15,7 +15,6 @@ import torch from botorch.acquisition.objective import ( - IdentityMCObjective, MCAcquisitionObjective, PosteriorTransform, ScalarizedPosteriorTransform, @@ -34,6 +33,7 @@ from botorch.utils.sampling import optimize_posterior_samples from botorch.utils.transforms import is_ensemble, normalize_indices from gpytorch.models import GP +from pyre_extensions import none_throws from torch import Tensor @@ -244,6 +244,76 @@ def objective(Y: Tensor, X: Tensor | None = None): return -(lb.clamp_max(0.0)) +def _prune_inferior_shared_processing( + model: Model, + X: Tensor, + is_moo: bool, + objective: MCAcquisitionObjective | None = None, + posterior_transform: PosteriorTransform | None = None, + constraints: list[Callable[[Tensor], Tensor]] | None = None, + num_samples: int = 2048, + max_frac: float = 1.0, + sampler: MCSampler | None = None, + marginalize_dim: int | None = None, +) -> tuple[int, Tensor, Tensor]: + r"""Shared data processing for `prune_inferior_points` and + `prune_inferior_points_multi_objective`. + + Returns: + - max_points: The maximum number of points to keep. + - obj_vals: The objective values of the points in `X`. + - infeas: A boolean tensor indicating feasibility of `X`. + """ + func_name = ( + "prune_inferior_points_multi_objective" if is_moo else "prune_inferior_points" + ) + if marginalize_dim is None and is_ensemble(model): + marginalize_dim = MCMC_DIM + + if X.ndim > 2: + raise UnsupportedError( + f"Batched inputs `X` are currently unsupported by `{func_name}`" + ) + if X.size(-2) == 0: + raise ValueError("X must have at least one point.") + if max_frac <= 0 or max_frac > 1.0: + raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}") + max_points = math.ceil(max_frac * X.size(-2)) + with torch.no_grad(): + posterior = model.posterior(X=X, posterior_transform=posterior_transform) + if sampler is None: + sampler = get_sampler( + posterior=posterior, sample_shape=torch.Size([num_samples]) + ) + samples = sampler(posterior) + if objective is not None: + obj_vals = objective(samples=samples, X=X) + elif is_moo: + obj_vals = samples + else: + obj_vals = samples.squeeze(-1) + if obj_vals.ndim > (2 + is_moo): + if obj_vals.ndim == (3 + is_moo) and marginalize_dim is not None: + if marginalize_dim < 0: + # Update `marginalize_dim` to be positive while accounting for + # removal of output dimension in SOO. + marginalize_dim = (not is_moo) + none_throws( + normalize_indices([marginalize_dim], d=obj_vals.ndim) + )[0] + obj_vals = obj_vals.mean(dim=marginalize_dim) + else: + raise UnsupportedError( + "Models with multiple batch dims are currently unsupported by " + f"`{func_name}`." + ) + infeas = ~compute_feasibility_indicator( + constraints=constraints, + samples=samples, + marginalize_dim=marginalize_dim, + ) + return max_points, obj_vals, infeas + + def prune_inferior_points( model: Model, X: Tensor, @@ -292,48 +362,16 @@ def prune_inferior_points( with `N_nz` the number of points in `X` that have non-zero (empirical, under `num_samples` samples) probability of being the best point. """ - if marginalize_dim is None and is_ensemble(model): - # TODO: Properly deal with marginalizing fully Bayesian models - marginalize_dim = MCMC_DIM - - if X.ndim > 2: - # TODO: support batched inputs (req. dealing with ragged tensors) - raise UnsupportedError( - "Batched inputs `X` are currently unsupported by prune_inferior_points" - ) - if X.size(-2) == 0: - raise ValueError("X must have at least one point.") - if max_frac <= 0 or max_frac > 1.0: - raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}") - max_points = math.ceil(max_frac * X.size(-2)) - with torch.no_grad(): - posterior = model.posterior(X=X, posterior_transform=posterior_transform) - if sampler is None: - sampler = get_sampler( - posterior=posterior, sample_shape=torch.Size([num_samples]) - ) - samples = sampler(posterior) - if objective is None: - objective = IdentityMCObjective() - obj_vals = objective(samples, X=X) - if obj_vals.ndim > 2: - if obj_vals.ndim == 3 and marginalize_dim is not None: - if marginalize_dim < 0: - # we do this again in compute_feasibility_indicator, but that will - # have no effect since marginalize_dim will be non-negative - marginalize_dim = ( - 1 + normalize_indices([marginalize_dim], d=obj_vals.ndim)[0] - ) - obj_vals = obj_vals.mean(dim=marginalize_dim) - else: - # TODO: support batched inputs (req. dealing with ragged tensors) - raise UnsupportedError( - "Models with multiple batch dims are currently unsupported by" - " prune_inferior_points." - ) - infeas = ~compute_feasibility_indicator( + max_points, obj_vals, infeas = _prune_inferior_shared_processing( + model=model, + X=X, + is_moo=False, + objective=objective, + posterior_transform=posterior_transform, constraints=constraints, - samples=samples, + num_samples=num_samples, + max_frac=max_frac, + sampler=sampler, marginalize_dim=marginalize_dim, ) if infeas.any(): diff --git a/test/acquisition/test_utils.py b/test/acquisition/test_utils.py index 7e4718150e..b8115ba0af 100644 --- a/test/acquisition/test_utils.py +++ b/test/acquisition/test_utils.py @@ -9,7 +9,6 @@ from unittest.mock import patch import torch - from botorch.acquisition.objective import ( ExpectationPosteriorTransform, GenericMCObjective, @@ -34,7 +33,6 @@ UnsupportedError, ) from botorch.models import SingleTaskGP - from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultivariateNormal