Skip to content

Commit

Permalink
Merge branch 'master' into patch-4
Browse files Browse the repository at this point in the history
  • Loading branch information
Balandat authored Mar 19, 2024
2 parents ebecc74 + 5b49fbb commit 33c4821
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 10 deletions.
2 changes: 1 addition & 1 deletion docs/source/distributions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ gpytorch.distributions
===================================

GPyTorch distribution objects are essentially the same as torch distribution objects.
For the most part, GpyTorch relies on torch's distribution library.
For the most part, GPyTorch relies on torch's distribution library.
However, we offer two custom distributions.

We implement a custom :obj:`~gpytorch.distributions.MultivariateNormal` that accepts
Expand Down
2 changes: 1 addition & 1 deletion examples/01_Exact_GPs/index.rst
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Exact GPs (Regression)
========================

Regression with a Gaussian noise model is the cannonical example of Gaussian processes.
Regression with a Gaussian noise model is the canonical example of Gaussian processes.
These examples will work for small to medium sized datasets (~2,000 data points).
All examples here use exact GP inference.

Expand Down
2 changes: 1 addition & 1 deletion gpytorch/kernels/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def forward(
) -> Union[Tensor, LinearOperator]:
r"""
Computes the covariance between :math:`\mathbf x_1` and :math:`\mathbf x_2`.
This method should be imlemented by all Kernel subclasses.
This method should be implemented by all Kernel subclasses.
:param x1: First set of data (... x N x D).
:param x2: Second set of data (... x M x D).
Expand Down
6 changes: 3 additions & 3 deletions gpytorch/test/base_keops_test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def test_forward_x1_neq_x2(self, use_keops=True, ard=False, **kwargs):
# The patch makes sure that we're actually using KeOps
k1 = kern1(x1, x2).to_dense()
k2 = kern2(x1, x2).to_dense()
self.assertLess(torch.norm(k1 - k2), 1e-4)
self.assertLess(torch.norm(k1 - k2), 1e-3)

if use_keops:
self.assertTrue(keops_mock.called)
Expand All @@ -86,7 +86,7 @@ def test_batch_matmul(self, use_keops=True, **kwargs):
# The patch makes sure that we're actually using KeOps
res1 = kern1(x1, x1).matmul(rhs)
res2 = kern2(x1, x1).matmul(rhs)
self.assertLess(torch.norm(res1 - res2), 1e-4)
self.assertLess(torch.norm(res1 - res2), 1e-3)

if use_keops:
self.assertTrue(keops_mock.called)
Expand Down Expand Up @@ -115,7 +115,7 @@ def test_gradient(self, use_keops=True, ard=False, **kwargs):
# stack all gradients into a tensor
grad_s1 = torch.vstack(torch.autograd.grad(s1, [*kern1.hyperparameters()]))
grad_s2 = torch.vstack(torch.autograd.grad(s2, [*kern2.hyperparameters()]))
self.assertAllClose(grad_s1, grad_s2, rtol=1e-4, atol=1e-5)
self.assertAllClose(grad_s1, grad_s2, rtol=1e-3, atol=1e-3)

if use_keops:
self.assertTrue(keops_mock.called)
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def find_version(*file_paths):

torch_min = "1.11"
install_requires = [
"mpmath>=0.19,<=1.3", # avoid incompatibiltiy with torch+sympy with mpmath 1.4
"scikit-learn",
"scipy",
"linear_operator>=0.5.2",
Expand Down
8 changes: 4 additions & 4 deletions test/examples/test_svgp_gp_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@


def train_data(cuda=False):
train_x = torch.linspace(0, 1, 260)
train_x = torch.linspace(0, 1, 150)
train_y = torch.cos(train_x * (2 * math.pi)).gt(0).float()
if cuda:
return train_x.cuda(), train_y.cuda()
Expand Down Expand Up @@ -49,7 +49,7 @@ class TestSVGPClassification(BaseTestCase, unittest.TestCase):
def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.VariationalELBO):
train_x, train_y = train_data(cuda=cuda)
likelihood = BernoulliLikelihood()
model = SVGPClassificationModel(torch.linspace(0, 1, 25))
model = SVGPClassificationModel(torch.linspace(0, 1, 64))
mll = mll_cls(likelihood, model, num_data=len(train_y))
if cuda:
likelihood = likelihood.cuda()
Expand All @@ -59,12 +59,12 @@ def test_classification_error(self, cuda=False, mll_cls=gpytorch.mlls.Variationa
# Find optimal model hyperparameters
model.train()
likelihood.train()
optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.1)
optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.03)

_wrapped_cg = MagicMock(wraps=linear_operator.utils.linear_cg)
_cg_mock = patch("linear_operator.utils.linear_cg", new=_wrapped_cg)
with _cg_mock as cg_mock:
for _ in range(400):
for _ in range(100):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
Expand Down

0 comments on commit 33c4821

Please sign in to comment.