Skip to content

Commit 7a48320

Browse files
committed
fix gemma2 gated model test
1 parent 4b8d1f7 commit 7a48320

File tree

1 file changed

+2
-0
lines changed

1 file changed

+2
-0
lines changed

tests/quantization/ggml/test_ggml.py

+2
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from transformers import AddedToken, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
1919
from transformers.testing_utils import (
2020
require_gguf,
21+
require_read_token,
2122
require_torch_gpu,
2223
slow,
2324
torch_device,
@@ -880,6 +881,7 @@ def test_gemma2_fp32(self):
880881
EXPECTED_TEXT = "Hello! 👋\n\nI'm a large language model"
881882
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
882883

884+
@require_read_token
883885
def test_gemma2_weights_conversion_fp32(self):
884886
original_model = AutoModelForCausalLM.from_pretrained(
885887
self.original_gemma2_model_id,

0 commit comments

Comments
 (0)