Skip to content

Commit e168e3a

Browse files
authored
[Tests] Use requires_gpu, fix missing gpu test skip, add explicit test for gpu from gha (#1264)
## Purpose ## * Update all tests to use `requires_gpu` decorator * Add GPU mark skip for `test_compressor_stacking`, which requires a GPU * Add an explicit GPU test for GHA, so as to unambiguously catch situations where CUDA is not properly installed on a runner --------- Signed-off-by: Kyle Sayers <[email protected]>
1 parent d783c26 commit e168e3a

File tree

3 files changed

+21
-5
lines changed

3 files changed

+21
-5
lines changed

tests/llmcompressor/pytorch/utils/test_helpers.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
tensors_to_device,
1515
tensors_to_precision,
1616
)
17+
from tests.testing_utils import requires_gpu
1718

1819

1920
@pytest.mark.unit
@@ -55,6 +56,7 @@ def test_tensors_to_device_cpu(tensors):
5556
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
5657
reason="Skipping pytorch tests",
5758
)
59+
@requires_gpu
5860
@pytest.mark.parametrize(
5961
"tensors",
6062
[
@@ -69,7 +71,6 @@ def test_tensors_to_device_cpu(tensors):
6971
[[torch.randn(1, 8)], torch.randn(8, 8)],
7072
],
7173
)
72-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
7374
def test_tensors_to_device_cuda(tensors):
7475
out = tensors_to_device(tensors, "cuda")
7576

@@ -364,6 +365,7 @@ def test_tensors_module_forward(module, tensors, check_feat_lab_inp):
364365
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
365366
reason="Skipping pytorch tests",
366367
)
368+
@requires_gpu
367369
@pytest.mark.parametrize(
368370
"module,tensors,check_feat_lab_inp",
369371
[
@@ -417,7 +419,6 @@ def test_tensors_module_forward(module, tensors, check_feat_lab_inp):
417419
),
418420
],
419421
)
420-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
421422
def test_tensors_module_forward_cuda(module, tensors, check_feat_lab_inp):
422423
module = module.to("cuda")
423424
tensors = tensors_to_device(tensors, "cuda")
@@ -471,6 +472,7 @@ def test_tensor_sparsity(tensor, dim, expected_sparsity):
471472
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
472473
reason="Skipping pytorch tests",
473474
)
475+
@requires_gpu
474476
@pytest.mark.parametrize(
475477
"tensor,dim,expected_sparsity",
476478
[
@@ -490,7 +492,6 @@ def test_tensor_sparsity(tensor, dim, expected_sparsity):
490492
),
491493
],
492494
)
493-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
494495
def test_tensor_sparsity_cuda(tensor, dim, expected_sparsity):
495496
tensor = tensor.to("cuda")
496497
sparsity = tensor_sparsity(tensor, dim)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import os
2+
3+
import pytest
4+
import torch
5+
6+
7+
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") != "true", reason="Only run for GHA")
8+
def test_has_gpu():
9+
"""
10+
This test exists purely to raise an error if
11+
a runner performs transformers tests without a GPU
12+
"""
13+
assert torch.cuda.is_available()

tests/llmcompressor/transformers/sparsification/test_compress_tensor_utils.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
modify_save_pretrained,
3131
patch_tied_tensors_bug,
3232
)
33+
from tests.testing_utils import requires_gpu
3334

3435

3536
@pytest.mark.parametrize(
@@ -275,7 +276,7 @@ def test_model_reload(offload, torch_dtype, tie_word_embeddings, device_map, tmp
275276
assert torch.equal(model_dict[key].cpu(), reloaded_dict[key].cpu())
276277

277278

278-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires gpu")
279+
@requires_gpu
279280
@pytest.mark.parametrize(
280281
"offload,torch_dtype,tie_word_embeddings,device_map",
281282
[
@@ -340,7 +341,7 @@ def test_model_shared_tensors(
340341
assert not torch.equal(lm_head, embed_tokens)
341342

342343

343-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires gpu")
344+
@requires_gpu
344345
@pytest.mark.parametrize(
345346
"offload,torch_dtype,tie_word_embeddings,device_map",
346347
[
@@ -356,6 +357,7 @@ def test_model_shared_tensors_gpu(
356357
)
357358

358359

360+
@requires_gpu
359361
@pytest.mark.parametrize(
360362
"model_stub, recipe, sparse_format, quant_format",
361363
[

0 commit comments

Comments
 (0)