Skip to content

Commit

Permalink
add model test
Browse files Browse the repository at this point in the history
  • Loading branch information
kkscilife committed Jan 22, 2024
1 parent abbf036 commit 135deca
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 0 deletions.
27 changes: 27 additions & 0 deletions .github/workflows/daily_tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: basic-model-tests-daily
on:
pull_request:
workflow_dispatch:
schedule:
- cron: '48 19 * * *'
env:
SLURM_PARTITION: llm_s

jobs:
HF_model:
runs-on: [t_cluster]
steps:
- uses: actions/checkout@v3

- name: load_hf_model
run: |
conda create -n internlm-model-latest --clone internlm-model-base
source activate internlm-model-latest
pip install transformers
pip install sentencepiece
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
- name: clear_env
run: |
conda deactivate
conda env remove --name internlm-model-latest
58 changes: 58 additions & 0 deletions tests/test_hf_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import pytest
import torch

from transformers import AutoTokenizer, AutoModelForCausalLM

prompts = [
"你好",
"what's your name"
]

def assert_model(response):
assert len(response) != 0
assert "user" not in response
assert "bot" not in response
assert "UNUSED_TOKEN" not in response


class TestChat:
@pytest.mark.parametrize("model_name", [
"internlm/internlm2-chat-7b",
"internlm/internlm2-chat-7b-sft",
])
def test_demo_default(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda()
model = model.eval()
for prompt in prompts:
response, history = model.chat(tokenizer, prompt, history=[])
print(response)
assert_model(response)

for prompt in prompts:
length = 0
for response, history in model.stream_chat(tokenizer, prompt, history=[]):
print(response[length:], flush=True, end="")
length = len(response)
assert_model(response)


class TestBase:
@pytest.mark.parametrize("model_name", [
"internlm/internlm2-7b",
"internlm/internlm2-base-7b",
])
def test_demo_default(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda()
for prompt in prompts:
inputs = tokenizer(prompt, return_tensors="pt")
for k,v in inputs.items():
inputs[k] = v.cuda()
gen_kwargs = {"max_length": 16280, "top_p": 10, "temperature": 1.0, "do_sample": True, "repetition_penalty": 1.0}
output = model.generate(**inputs, **gen_kwargs)
output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
print(output)
assert_model(output)

0 comments on commit 135deca

Please sign in to comment.