From bf2bd2499c506688148554cc9a735244b3202ca7 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Mon, 13 Jan 2025 18:16:49 +0100 Subject: [PATCH] style fixup, fix doc builder --- docs/source/en/_toctree.yml | 2 ++ tests/models/helium/test_modeling_helium.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 529b113cf1e5..40780d24d51c 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -452,6 +452,8 @@ title: Granite - local: model_doc/granitemoe title: GraniteMoe + - local: model_doc/helium + title: Helium - local: model_doc/herbert title: HerBERT - local: model_doc/ibert diff --git a/tests/models/helium/test_modeling_helium.py b/tests/models/helium/test_modeling_helium.py index 06e738338774..3ad2cf736678 100644 --- a/tests/models/helium/test_modeling_helium.py +++ b/tests/models/helium/test_modeling_helium.py @@ -98,9 +98,9 @@ def test_model_2b(self): "Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have" ] - model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision="refs/pr/1").to( - torch_device - ) + model = AutoModelForCausalLM.from_pretrained( + model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision="refs/pr/1" + ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1") inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)