Skip to content

Commit 2b984ff

Browse files
kkscilifewangmengke
andauthored
test(workflow): add ci workflow for acc test (InternLM#485)
* add ci workflow for acc test * change train script * add --kill-on-bad-exit=1 and change always to !cancelled --------- Co-authored-by: wangmengke <[email protected]>
1 parent 626ed0f commit 2b984ff

File tree

9 files changed

+76
-24
lines changed

9 files changed

+76
-24
lines changed

.github/workflows/demo_in_readme.yaml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ jobs:
3535
fi
3636
3737
dataset-preparation:
38-
if: ${{ always() }}
38+
if: ${{ !cancelled() }}
3939
needs: check-requirements
4040
runs-on: [t_cluster]
4141
steps:
@@ -55,7 +55,7 @@ jobs:
5555
sh ./ci_scripts/data/tokenizer_alpaca.sh
5656
5757
train:
58-
if: ${{ always() }}
58+
if: ${{ !cancelled() }}
5959
needs: check-requirements
6060
runs-on: [t_cluster]
6161
timeout-minutes: 30
@@ -92,7 +92,7 @@ jobs:
9292
rsync -av --remove-source-files $GITHUB_WORKSPACE/llm_ckpts ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
9393
9494
convert-model-then-load:
95-
if: ${{ always() }}
95+
if: ${{ !cancelled() }}
9696
needs: check-requirements
9797
runs-on: [t_cluster]
9898
timeout-minutes: 15
@@ -108,11 +108,11 @@ jobs:
108108
export PYTHONPATH=$PWD:$PYTHONPATH
109109
sh ./ci_scripts/model/convert_to_hf.sh
110110
cd ./hf_ckpt
111-
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py
111+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py
112112
cd ..
113113
rsync -av --remove-source-files $GITHUB_WORKSPACE/hf_ckpt ${{env.WORKSPACE_PREFIX}}/ci_clean_bak
114114
load-chat-model-in-hf:
115-
if: ${{ always() }}
115+
if: ${{ !cancelled() }}
116116
needs: check-requirements
117117
runs-on: [t_cluster]
118118
timeout-minutes: 15
@@ -125,4 +125,4 @@ jobs:
125125
- name: chat-model-in-hf
126126
run: |
127127
source activate internlm-env-test
128-
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ./ci_scripts/model/demo_load_7B_chat_model.py
128+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ./ci_scripts/model/demo_load_7B_chat_model.py

.github/workflows/e2e_test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ jobs:
1919
- name: training_8GPU
2020
run: |
2121
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
22-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
22+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training

.github/workflows/pr_merged.yaml

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
name: pr-merged
2+
on:
3+
push:
4+
branches:
5+
- "develop"
6+
- "main"
7+
paths-ignore:
8+
- "cmds/**"
9+
- "**.md"
10+
env:
11+
WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4)
12+
SLURM_PARTITION: llm_s
13+
14+
jobs:
15+
check-requirements:
16+
runs-on: [t_cluster]
17+
steps:
18+
- name: mask env
19+
run: |
20+
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
21+
- uses: actions/checkout@v3
22+
with:
23+
fetch-depth: 2
24+
- name: check-requirements
25+
run: |
26+
changed_files=$(git diff --name-only -r HEAD^1 HEAD)
27+
echo $changed_files
28+
if [[ $changed_files =~ "runtime.txt" ]]; then
29+
pip install -r requirements/runtime.txt
30+
fi
31+
32+
if [[ $changed_files =~ "torch.txt" ]]; then
33+
pip install -r requirements/torch.txt
34+
fi
35+
36+
37+
acc_tests:
38+
if: ${{ !cancelled() }}
39+
needs: check-requirements
40+
runs-on: [t_cluster]
41+
timeout-minutes: 30
42+
steps:
43+
- name: mask env
44+
run: |
45+
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
46+
- uses: actions/checkout@v3
47+
48+
- name: acc_tests
49+
run: |
50+
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
51+
export PYTHONPATH=$PWD:$PYTHONPATH
52+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-acc-test-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python ./tests/test_training/train_CI.py --config ./tests/test_training/7B_check_acc.py

.github/workflows/unit_tests.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ jobs:
4242
4343
4444
unit_tests_core_pipeline:
45-
if: ${{ always() }}
45+
if: ${{ !cancelled() }}
4646
needs: check-requirements
4747
runs-on: [t_cluster]
4848
timeout-minutes: 20
@@ -56,10 +56,10 @@ jobs:
5656
run: |
5757
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
5858
export PYTHONPATH=$PWD:$PYTHONPATH
59-
srun -p ${SLURM_PARTITION} --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_core/test_pipeline.py
59+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_core/test_pipeline.py
6060
6161
unit_tests_utils_storage_manager:
62-
if: ${{ always() }}
62+
if: ${{ !cancelled() }}
6363
needs: check-requirements
6464
runs-on: [t_cluster]
6565
timeout-minutes: 20
@@ -73,4 +73,4 @@ jobs:
7373
run: |
7474
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
7575
export PYTHONPATH=$PWD:$PYTHONPATH
76-
srun -p ${SLURM_PARTITION} --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_utils/test_storage_manager.py
76+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=internlm-ut-${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s -v ./tests/test_utils/test_storage_manager.py

.github/workflows/weekly_test.yaml

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
- name: training_8GPU
1818
run: |
1919
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
20-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
20+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n8 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_8GPU" ./tests/test_training
2121
2222
training_16GPU_8DP2TP:
2323
runs-on: [t_cluster]
@@ -29,7 +29,7 @@ jobs:
2929
run: |
3030
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
3131
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
32-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
32+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TP" ./tests/test_training
3333
3434
training_16GPU_8DP2TPSP:
3535
runs-on: [t_cluster]
@@ -42,7 +42,7 @@ jobs:
4242
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
4343
sed -i 's/^.*tensor=.*/ tensor=2,/' ./configs/7B_sft.py
4444
sed -i 's/^.*sequence_parallel=.*/ sequence_parallel=True,/' ./configs/7B_sft.py
45-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
45+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2TPSP" ./tests/test_training
4646
4747
training_16GPU_8DP2PP:
4848
runs-on: [t_cluster]
@@ -54,7 +54,7 @@ jobs:
5454
run: |
5555
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
5656
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2),/' ./configs/7B_sft.py
57-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
57+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP" ./tests/test_training
5858
5959
training_16GPU_8DP2PP_InterleavedOverlap:
6060
runs-on: [t_cluster]
@@ -67,7 +67,7 @@ jobs:
6767
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
6868
sed -i 's/^.*pipeline=.*/ pipeline=dict(size=2, interleaved_overlap=True),/' ./configs/7B_sft.py
6969
sed -i 's/^.*num_chunks=.*/ num_chunks=2,/' ./configs/7B_sft.py
70-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
70+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -n16 --ntasks-per-node=8 --cpus-per-task=4 --gpus-per-task=1 pytest -s -v --color=yes -m "training_16GPU_8DP2PP_InterleavedOverlap" ./tests/test_training
7171
7272
unit_test_optimizer:
7373
runs-on: [t_cluster]
@@ -78,7 +78,7 @@ jobs:
7878
- name: test_optimizer
7979
run: |
8080
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
81-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
81+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_solver/test_optimizer.py
8282
8383
unit_test_model:
8484
runs-on: [t_cluster]
@@ -89,14 +89,14 @@ jobs:
8989
- name: test_embedding_accuracy
9090
run: |
9191
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
92-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
92+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_embedding.py
9393
9494
- name: test_model_internlm_accuracy
9595
run: |
9696
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
97-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
97+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_model_internlm.py
9898
9999
- name: test_norm_accuracy
100100
run: |
101101
source /mnt/petrelfs/share_data/llm_env/env/llm-flash2.0
102-
srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py
102+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --quotatype=spot -N 1 -n 1 --gres=gpu:8 python -m pytest -s ./tests/test_model/test_norm.py

ci_scripts/data/tokenizer_chinese.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ if [[ ${num} -gt 0 ]]; then
2323
fi
2424
fi
2525

26-
srun -p ${SLURM_PARTITION} --quotatype=spot --job-name=$1 --gpus-per-task=1 python tools/tokenizer.py --text_input_path ${DATA} --bin_output_path ${RESULT}
26+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --quotatype=spot --job-name=$1 --gpus-per-task=1 python tools/tokenizer.py --text_input_path ${DATA} --bin_output_path ${RESULT}
2727
[[ $? -ne 0 ]] && { echo "test tokenizer.py failed."; exit_code=$(($exit_code + 1)); }
2828

2929
file_list=($RESULT $RESULT_META)

ci_scripts/train/load_ckpt.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ if [[ ! -f ${file} ]]; then
2222
exit_code=$(($exit_code + 1))
2323
fi
2424

25-
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$2 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ${file}
25+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$2 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ${file}
2626
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
2727

2828

ci_scripts/train/slurm_train.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ if [[ -d ${CKPTS20_PATH} ]]; then
2222
fi
2323
fi
2424

25-
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
25+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./ci_scripts/train/ci_7B_sft.py
2626
[[ $? -ne 0 ]] && { echo "test slurm training failed."; exit_code=$(($exit_code + 1)); }
2727

2828
num=$(num_files "${CKPTS20_OUTPUT}")

ci_scripts/train/torchrun.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ if [[ -d ${CKPTS20_PATH} ]]; then
2222
fi
2323
fi
2424

25-
srun -p ${SLURM_PARTITION} --exclusive --quotatype=spot --job-name=$1 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher torch
25+
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --exclusive --quotatype=spot --job-name=$1 -N 1 torchrun --nnodes=1 --nproc_per_node=8 --master_port=29501 train.py --config ./ci_scripts/train/ci_7B_sft.py --launcher torch
2626
[[ $? -ne 0 ]] && { echo "test torch training failed."; exit_code=$(($exit_code + 1)); }
2727

2828
num=$(num_files "${CKPTS_OUTPUT}")

0 commit comments

Comments
 (0)