diff --git a/docs/source/distributions.rst b/docs/source/distributions.rst index a98bd862b..e8ceeb9ee 100644 --- a/docs/source/distributions.rst +++ b/docs/source/distributions.rst @@ -5,7 +5,7 @@ gpytorch.distributions =================================== GPyTorch distribution objects are essentially the same as torch distribution objects. -For the most part, GpyTorch relies on torch's distribution library. +For the most part, GPyTorch relies on torch's distribution library. However, we offer two custom distributions. We implement a custom :obj:`~gpytorch.distributions.MultivariateNormal` that accepts diff --git a/examples/01_Exact_GPs/index.rst b/examples/01_Exact_GPs/index.rst index ce91fd315..720795a4f 100644 --- a/examples/01_Exact_GPs/index.rst +++ b/examples/01_Exact_GPs/index.rst @@ -1,7 +1,7 @@ Exact GPs (Regression) ======================== -Regression with a Gaussian noise model is the cannonical example of Gaussian processes. +Regression with a Gaussian noise model is the canonical example of Gaussian processes. These examples will work for small to medium sized datasets (~2,000 data points). All examples here use exact GP inference. diff --git a/gpytorch/kernels/kernel.py b/gpytorch/kernels/kernel.py index b8e5f34ec..69dab421c 100644 --- a/gpytorch/kernels/kernel.py +++ b/gpytorch/kernels/kernel.py @@ -236,7 +236,7 @@ def forward( ) -> Union[Tensor, LinearOperator]: r""" Computes the covariance between :math:`\mathbf x_1` and :math:`\mathbf x_2`. - This method should be imlemented by all Kernel subclasses. + This method should be implemented by all Kernel subclasses. :param x1: First set of data (... x N x D). :param x2: Second set of data (... x M x D). diff --git a/gpytorch/test/base_keops_test_case.py b/gpytorch/test/base_keops_test_case.py index fb261c860..ca32b4d64 100644 --- a/gpytorch/test/base_keops_test_case.py +++ b/gpytorch/test/base_keops_test_case.py @@ -66,7 +66,7 @@ def test_forward_x1_neq_x2(self, use_keops=True, ard=False, **kwargs): # The patch makes sure that we're actually using KeOps k1 = kern1(x1, x2).to_dense() k2 = kern2(x1, x2).to_dense() - self.assertLess(torch.norm(k1 - k2), 1e-4) + self.assertLess(torch.norm(k1 - k2), 1e-3) if use_keops: self.assertTrue(keops_mock.called) @@ -86,7 +86,7 @@ def test_batch_matmul(self, use_keops=True, **kwargs): # The patch makes sure that we're actually using KeOps res1 = kern1(x1, x1).matmul(rhs) res2 = kern2(x1, x1).matmul(rhs) - self.assertLess(torch.norm(res1 - res2), 1e-4) + self.assertLess(torch.norm(res1 - res2), 1e-3) if use_keops: self.assertTrue(keops_mock.called) @@ -115,7 +115,7 @@ def test_gradient(self, use_keops=True, ard=False, **kwargs): # stack all gradients into a tensor grad_s1 = torch.vstack(torch.autograd.grad(s1, [*kern1.hyperparameters()])) grad_s2 = torch.vstack(torch.autograd.grad(s2, [*kern2.hyperparameters()])) - self.assertAllClose(grad_s1, grad_s2, rtol=1e-4, atol=1e-5) + self.assertAllClose(grad_s1, grad_s2, rtol=1e-3, atol=1e-3) if use_keops: self.assertTrue(keops_mock.called) diff --git a/setup.py b/setup.py index f86a41a7c..df580c8aa 100644 --- a/setup.py +++ b/setup.py @@ -39,6 +39,7 @@ def find_version(*file_paths): torch_min = "1.11" install_requires = [ + "mpmath>=0.19,<=1.3", # avoid incompatibiltiy with torch+sympy with mpmath 1.4 "scikit-learn", "scipy", "linear_operator>=0.5.2", diff --git a/test/examples/test_svgp_gp_classification.py b/test/examples/test_svgp_gp_classification.py index 1645b8c70..8a6efe689 100644 --- a/test/examples/test_svgp_gp_classification.py +++ b/test/examples/test_svgp_gp_classification.py @@ -16,7 +16,7 @@ def train_data(cuda=False): - train_x = torch.linspace(0, 1, 260) + train_x = torch.linspace(0, 1, 150) train_y = torch.cos(train_x * (2 * math.pi)).gt(0).float() if cuda: return train_x.cuda(), train_y.cuda() @@ -49,7 +49,7 @@ class TestSVGPClassification(BaseTestCase, unittest.TestCase): def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.VariationalELBO): train_x, train_y = train_data(cuda=cuda) likelihood = BernoulliLikelihood() - model = SVGPClassificationModel(torch.linspace(0, 1, 25)) + model = SVGPClassificationModel(torch.linspace(0, 1, 64)) mll = mll_cls(likelihood, model, num_data=len(train_y)) if cuda: likelihood = likelihood.cuda() @@ -59,12 +59,12 @@ def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.Variationa # Find optimal model hyperparameters model.train() likelihood.train() - optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.1) + optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.03) _wrapped_cg = MagicMock(wraps=linear_operator.utils.linear_cg) _cg_mock = patch("linear_operator.utils.linear_cg", new=_wrapped_cg) with _cg_mock as cg_mock: - for _ in range(400): + for _ in range(100): optimizer.zero_grad() output = model(train_x) loss = -mll(output, train_y)