-
Notifications
You must be signed in to change notification settings - Fork 3k
/
Copy pathgrpo_argument.json
73 lines (73 loc) · 2.1 KB
/
grpo_argument.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
{
"logging_steps": 1,
"logging_dir": "vdl_log",
"train_datasets": "ppo-kk/34567ppl/train.jsonl",
"eval_datasets": "ppo-kk/5ppl/test.jsonl",
"label_key": "tgt",
"actor_model_name_or_path": "meta-llama/Meta-Llama-3.1-8B",
"reward_model_name_or_path": "",
"use_rm_server": true,
"reward_server": "http://127.0.0.1:8731",
"rl_algorithm": "grpo",
"bf16": 1,
"fp16_opt_level": "O2",
"do_train": 1,
"adam_beta1": 0.9,
"adam_beta2": 0.95,
"dataloader_drop_last": 0,
"gradient_accumulation_steps": 32,
"ignore_save_lr_and_optim": 1,
"kl_loss_coeff": 0.001,
"learning_rate": 5e-7,
"lr_scheduler_type": "cosine",
"max_grad_norm": 1.0,
"max_length": 4608,
"max_steps": 3600,
"min_learning_rate": 2e-7,
"num_train_epochs": 1,
"output_dir": "checkpoints/llama-grpo",
"per_device_train_batch_size": 8,
"save_steps": 400,
"save_strategy": "steps",
"save_total_limit": 5,
"update_iters": 1,
"warmup_ratio": 0.03,
"weight_decay": 0.01,
"do_eval": 1,
"evaluation_strategy": "steps",
"per_device_eval_batch_size": 4,
"eval_steps": 10,
"eval_mode": "",
"max_dec_len": 4096,
"max_prompt_len": 512,
"min_dec_len": 1,
"normalize_advantage": 1,
"normalize_reward": 1,
"num_return_sequences": 8,
"per_device_prompt_batch_size": 32,
"repetition_penalty": 1.0,
"temperature": 0.7,
"top_p": 1.0,
"fused_linear": 1,
"recompute": 1,
"recompute_granularity": "full",
"recompute_use_reentrant": 1,
"use_flash_attention": 1,
"use_fused_head_and_loss_fn": 0,
"use_fusemt": 1,
"offload_level": "freeze_model optimizer train_model",
"release_grads": 1,
"clip_range_ratio": 0.2,
"clip_range_score": 10.0,
"clip_range_value": 5.0,
"ptx_coeff": 16.0,
"disable_tqdm": 1,
"sharding_parallel_degree": 1,
"sharding": "stage1",
"tensor_parallel_degree": 8,
"tensor_parallel_output": 0,
"pipeline_parallel_degree": 1,
"pipeline_parallel_config": "disable_p2p_cache_shape",
"sequence_parallel": 0,
"seed": 23
}