File tree Expand file tree Collapse file tree 2 files changed +117
-0
lines changed
Expand file tree Collapse file tree 2 files changed +117
-0
lines changed Original file line number Diff line number Diff line change 1+ wandb :
2+ run_name : zai-org/GLM-4.7
3+ # api: vllm-docker
4+ api : openai-compatible
5+ base_url : http://gb-nvl-059-compute03:8000/v1
6+ num_gpus : 8
7+ batch_size : 128
8+ model :
9+ use_wandb_artifacts : false
10+ pretrained_model_name_or_path : zai-org/GLM-4.7
11+ bfcl_model_id : " unified-oss-fc"
12+ size_category : " Large (30B+)"
13+ size : 358337791296
14+ release_date : 12/22/2025
15+
16+ vllm : # This config not used because vLLM launched manually, but for record
17+ vllm_tag : nightly-f1c2c20136cca6ea8798a64855eaf52ee9a42210
18+ lifecycle : always_on
19+ gpu_memory_utilization : 0.95
20+ reasoning_parser : glm45
21+ tool_call_parser : glm47
22+ enable_auto_tool_choice : true
23+ trust_remote_code : true
24+
25+ generator :
26+ max_tokens : 202752
27+ temperature : 1.0
28+ top_p : 0.95
29+
30+ jaster :
31+ override_max_tokens : 202752
32+
33+ jbbq :
34+ generator_config :
35+ max_tokens : 202752
36+ temperature : 1.0
37+ top_p : 0.95
38+
39+ toxicity :
40+ generator_config :
41+ max_tokens : 202752
42+ temperature : 1.0
43+ top_p : 0.95
44+
45+ jtruthfulqa :
46+ generator_config :
47+ max_tokens : 202752
48+ temperature : 1.0
49+ top_p : 0.95
50+
51+ swebench :
52+ max_tokens : 202752
53+
54+ mtbench :
55+ generator_config :
56+ max_tokens : 202752
57+ temperature : 1.0
58+ top_p : 0.95
59+ temperature_override :
60+ writing : 1.0
61+ roleplay : 1.0
62+ extraction : 1.0
63+ math : 1.0
64+ coding : 1.0
65+ reasoning : 1.0
66+ stem : 1.0
67+ humanities : 1.0
68+
69+ bfcl :
70+ generator_config :
71+ max_tokens : 202752
72+ temperature : 1.0
73+ top_p : 0.95
74+
75+ hallulens :
76+ generator_config :
77+ max_tokens : 202752
78+ temperature : 1.0
79+ top_p : 0.95
80+
81+ hle :
82+ generator_config :
83+ max_tokens : 202752
84+ temperature : 1.0
85+ top_p : 0.95
86+
87+ arc_agi :
88+ max_output_tokens : 202752
Original file line number Diff line number Diff line change 1+ wandb :
2+ run_name : mistralai/Mistral-Large-3-675B-Instruct-2512
3+ # api: vllm-docker
4+ api : openai-compatible
5+ base_url : http://gb-nvl-059-compute03:8000/v1
6+ num_gpus : 8
7+ batch_size : 256
8+ model :
9+ use_wandb_artifacts : false
10+ pretrained_model_name_or_path : mistralai/Mistral-Large-3-675B-Instruct-2512
11+ bfcl_model_id : " unified-oss-fc"
12+ size_category : " Large (30B+)"
13+ size : 676032104960
14+ release_date : 12/03/2025
15+
16+ vllm : # This config not used because vLLM launched manually, but for record
17+ vllm_tag : v0.12.0
18+ lifecycle : always_on
19+ gpu_memory_utilization : 0.95
20+ enable_auto_tool_choice : true
21+ tool_call_parser : mistral
22+ extra_args :
23+ - " --config_format=mistral"
24+ - " --load_format=mistral"
25+ - " --tokenizer_mode=mistral"
26+
27+ generator :
28+ max_tokens : 262144
29+ temperature : 0.0
You can’t perform that action at this time.
0 commit comments