Skip to content

Commit 1f53f4b

Browse files
committed
Run isort
1 parent 1de5210 commit 1f53f4b

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

tests/hf_olmo/modeling_olmo_test.py

+4
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ def test_olmo_model(model_path: str):
2828
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Requires CUDA devices")
2929
def test_flash_attention_2(model_path: str):
3030
from transformers import AutoModelForCausalLM, AutoTokenizer
31+
3132
import hf_olmo # noqa: F401
3233

3334
hf_model = AutoModelForCausalLM.from_pretrained(model_path)
@@ -45,6 +46,7 @@ def test_flash_attention_2(model_path: str):
4546

4647
def test_sdpa(model_path: str):
4748
from transformers import AutoModelForCausalLM, AutoTokenizer
49+
4850
import hf_olmo # noqa: F401
4951

5052
hf_model = AutoModelForCausalLM.from_pretrained(model_path)
@@ -62,6 +64,7 @@ def test_sdpa(model_path: str):
6264

6365
def test_gradient_checkpointing(model_path: str):
6466
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
67+
6568
import hf_olmo # noqa: F401
6669

6770
hf_model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(model_path)
@@ -81,6 +84,7 @@ def test_gradient_checkpointing(model_path: str):
8184

8285
def test_gradient_checkpointing_disable(model_path: str):
8386
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
87+
8488
import hf_olmo # noqa: F401
8589

8690
hf_model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(model_path)

0 commit comments

Comments
 (0)