diff --git a/.github/workflows/daily_tests.yaml b/.github/workflows/daily_tests.yaml index 76dc73d1..0e1562f8 100644 --- a/.github/workflows/daily_tests.yaml +++ b/.github/workflows/daily_tests.yaml @@ -23,6 +23,7 @@ jobs: run: | conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest + # TODO:test other version of transformers pip install transformers pip install sentencepiece srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py diff --git a/tests/test_hf_model.py b/tests/test_hf_model.py index 871bc537..897b205d 100644 --- a/tests/test_hf_model.py +++ b/tests/test_hf_model.py @@ -7,8 +7,6 @@ def assert_model(response): assert len(response) != 0 - assert "user" not in response - assert "bot" not in response assert "UNUSED_TOKEN" not in response @@ -69,7 +67,7 @@ def test_demo_default(self, model_name): for k, v in inputs.items(): inputs[k] = v.cuda() gen_kwargs = { - "max_length": 16280, + "max_length": 128, "top_p": 10, "temperature": 1.0, "do_sample": True,