Skip to content

Commit a65519b

Browse files
chore: add Synthetic provider with updated model configs (#100)
1 parent 9f23bd1 commit a65519b

File tree

3 files changed

+385
-0
lines changed

3 files changed

+385
-0
lines changed
Lines changed: 375 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,375 @@
1+
{
2+
"name": "Synthetic",
3+
"id": "synthetic",
4+
"type": "openai-compat",
5+
"api_key": "$SYNTHETIC_API_KEY",
6+
"api_endpoint": "https://api.synthetic.new/openai/v1",
7+
"default_large_model_id": "hf:zai-org/GLM-4.6",
8+
"default_small_model_id": "hf:openai/gpt-oss-120b",
9+
"models": [
10+
{
11+
"id": "hf:deepseek-ai/DeepSeek-R1",
12+
"name": "DeepSeek R1",
13+
"cost_per_1m_in": 0.55,
14+
"cost_per_1m_out": 2.19,
15+
"cost_per_1m_in_cached": 0.07,
16+
"cost_per_1m_out_cached": 0.14,
17+
"context_window": 128000,
18+
"default_max_tokens": 65536,
19+
"can_reason": true,
20+
"reasoning_levels": [
21+
"low",
22+
"medium",
23+
"high"
24+
],
25+
"default_reasoning_effort": "medium",
26+
"supports_attachments": false
27+
},
28+
{
29+
"id": "hf:deepseek-ai/DeepSeek-R1-0528",
30+
"name": "DeepSeek R1 0528",
31+
"cost_per_1m_in": 3.0,
32+
"cost_per_1m_out": 8.0,
33+
"cost_per_1m_in_cached": 0.07,
34+
"cost_per_1m_out_cached": 0.14,
35+
"context_window": 128000,
36+
"default_max_tokens": 65536,
37+
"can_reason": true,
38+
"reasoning_levels": [
39+
"low",
40+
"medium",
41+
"high"
42+
],
43+
"default_reasoning_effort": "medium",
44+
"supports_attachments": false
45+
},
46+
{
47+
"id": "hf:deepseek-ai/DeepSeek-V3",
48+
"name": "DeepSeek V3",
49+
"cost_per_1m_in": 1.25,
50+
"cost_per_1m_out": 1.25,
51+
"cost_per_1m_in_cached": 0.07,
52+
"cost_per_1m_out_cached": 0.14,
53+
"context_window": 128000,
54+
"default_max_tokens": 8192,
55+
"can_reason": false,
56+
"supports_attachments": false
57+
},
58+
{
59+
"id": "hf:deepseek-ai/DeepSeek-V3-0324",
60+
"name": "DeepSeek V3 0324",
61+
"cost_per_1m_in": 1.2,
62+
"cost_per_1m_out": 1.2,
63+
"cost_per_1m_in_cached": 0.07,
64+
"cost_per_1m_out_cached": 0.14,
65+
"context_window": 128000,
66+
"default_max_tokens": 8192,
67+
"can_reason": false,
68+
"supports_attachments": false
69+
},
70+
{
71+
"id": "hf:deepseek-ai/DeepSeek-V3.1",
72+
"name": "DeepSeek V3.1",
73+
"cost_per_1m_in": 0.56,
74+
"cost_per_1m_out": 1.68,
75+
"cost_per_1m_in_cached": 0.07,
76+
"cost_per_1m_out_cached": 0.14,
77+
"context_window": 131072,
78+
"context_window": 128000,
79+
"default_max_tokens": 8192,
80+
"can_reason": true,
81+
"reasoning_levels": [
82+
"low",
83+
"medium",
84+
"high"
85+
],
86+
"default_reasoning_effort": "medium",
87+
"supports_attachments": false
88+
},
89+
{
90+
"id": "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
91+
"name": "DeepSeek V3.1 Terminus",
92+
"cost_per_1m_in": 1.2,
93+
"cost_per_1m_out": 1.2,
94+
"cost_per_1m_in_cached": 0.07,
95+
"cost_per_1m_out_cached": 0.14,
96+
"context_window": 128000,
97+
"default_max_tokens": 8192,
98+
"can_reason": true,
99+
"reasoning_levels": [
100+
"low",
101+
"medium",
102+
"high"
103+
],
104+
"default_reasoning_effort": "medium",
105+
"supports_attachments": false
106+
},
107+
{
108+
"id": "hf:meta-llama/Llama-3.1-405B-Instruct",
109+
"name": "Llama 3.1 405B Instruct",
110+
"cost_per_1m_in": 3.0,
111+
"cost_per_1m_out": 3.0,
112+
"cost_per_1m_in_cached": 0.27,
113+
"cost_per_1m_out_cached": 0.55,
114+
"context_window": 131072,
115+
"default_max_tokens": 4096,
116+
"can_reason": true,
117+
"reasoning_levels": [
118+
"low",
119+
"medium",
120+
"high"
121+
],
122+
"default_reasoning_effort": "medium",
123+
"supports_attachments": false
124+
},
125+
{
126+
"id": "hf:meta-llama/Llama-3.1-70B-Instruct",
127+
"name": "Llama 3.1 70B Instruct",
128+
"cost_per_1m_in": 0.9,
129+
"cost_per_1m_out": 0.9,
130+
"cost_per_1m_in_cached": 0.59,
131+
"cost_per_1m_out_cached": 1.1,
132+
"context_window": 131072,
133+
"context_window": 128000,
134+
"default_max_tokens": 4096,
135+
"can_reason": false,
136+
"supports_attachments": false
137+
},
138+
{
139+
"id": "hf:meta-llama/Llama-3.1-8B-Instruct",
140+
"name": "Llama 3.1 8B Instruct",
141+
"cost_per_1m_in": 0.2,
142+
"cost_per_1m_out": 0.2,
143+
"cost_per_1m_in_cached": 0.07,
144+
"cost_per_1m_out_cached": 0.2,
145+
"context_window": 128000,
146+
"default_max_tokens": 4096,
147+
"can_reason": false,
148+
"supports_attachments": false
149+
},
150+
{
151+
"id": "hf:meta-llama/Llama-3.3-70B-Instruct",
152+
"name": "Llama 3.3 70B Instruct",
153+
"cost_per_1m_in": 0.9,
154+
"cost_per_1m_out": 0.9,
155+
"cost_per_1m_in_cached": 0.59,
156+
"cost_per_1m_out_cached": 1.1,
157+
"context_window": 128000,
158+
"default_max_tokens": 4096,
159+
"can_reason": false,
160+
"supports_attachments": false
161+
},
162+
{
163+
"id": "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
164+
"name": "Llama 4 Maverick 17B 128E Instruct FP8",
165+
"cost_per_1m_in": 0.22,
166+
"cost_per_1m_out": 0.88,
167+
"cost_per_1m_in_cached": 0.14,
168+
"cost_per_1m_out_cached": 0.55,
169+
"context_window": 524000,
170+
"default_max_tokens": 4096,
171+
"can_reason": false,
172+
"supports_attachments": true
173+
},
174+
{
175+
"id": "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct",
176+
"name": "Llama 4 Scout 17B 16E Instruct",
177+
"cost_per_1m_in": 0.15,
178+
"cost_per_1m_out": 0.6,
179+
"cost_per_1m_in_cached": 0.14,
180+
"cost_per_1m_out_cached": 0.55,
181+
"context_window": 328000,
182+
"default_max_tokens": 8192,
183+
"can_reason": false,
184+
"supports_attachments": true
185+
},
186+
{
187+
"id": "hf:MiniMaxAI/MiniMax-M2",
188+
"name": "MiniMax M2",
189+
"cost_per_1m_in": 0.55,
190+
"cost_per_1m_out": 2.19,
191+
"cost_per_1m_in_cached": 0.27,
192+
"cost_per_1m_out_cached": 0.55,
193+
"context_window": 192000,
194+
"default_max_tokens": 65536,
195+
"can_reason": true,
196+
"reasoning_levels": [
197+
"low",
198+
"medium",
199+
"high"
200+
],
201+
"default_reasoning_effort": "medium",
202+
"supports_attachments": false
203+
},
204+
{
205+
"id": "hf:moonshotai/Kimi-K2-Instruct",
206+
"name": "Kimi K2 Instruct",
207+
"cost_per_1m_in": 0.6,
208+
"cost_per_1m_out": 2.5,
209+
"cost_per_1m_in_cached": 0.27,
210+
"cost_per_1m_out_cached": 0.55,
211+
"context_window": 128000,
212+
"default_max_tokens": 131072,
213+
"can_reason": false,
214+
"supports_attachments": false
215+
},
216+
{
217+
"id": "hf:moonshotai/Kimi-K2-Thinking",
218+
"name": "Kimi K2 Instruct",
219+
"cost_per_1m_in": 0.55,
220+
"cost_per_1m_out": 2.19,
221+
"cost_per_1m_in_cached": 0.55,
222+
"cost_per_1m_out_cached": 2.19,
223+
"context_window": 128000,
224+
"default_max_tokens": 131072,
225+
"can_reason": true,
226+
"reasoning_levels": [
227+
"low",
228+
"medium",
229+
"high"
230+
],
231+
"default_reasoning_effort": "medium",
232+
"supports_attachments": false
233+
},
234+
{
235+
"id": "hf:moonshotai/Kimi-K2-Instruct-0905",
236+
"name": "Kimi K2 Instruct 0905",
237+
"cost_per_1m_in": 1.2,
238+
"cost_per_1m_out": 1.2,
239+
"cost_per_1m_in_cached": 0.55,
240+
"cost_per_1m_out_cached": 1.1,
241+
"context_window": 262144,
242+
"context_window": 256000,
243+
"default_max_tokens": 262144,
244+
"can_reason": false,
245+
"supports_attachments": false
246+
},
247+
{
248+
"id": "hf:openai/gpt-oss-120b",
249+
"name": "GPT-OSS 120B",
250+
"cost_per_1m_in": 0.1,
251+
"cost_per_1m_out": 0.1,
252+
"cost_per_1m_in_cached": 0.55,
253+
"cost_per_1m_out_cached": 1.1,
254+
"context_window": 128000,
255+
"default_max_tokens": 65536,
256+
"can_reason": true,
257+
"reasoning_levels": [
258+
"low",
259+
"medium",
260+
"high"
261+
],
262+
"default_reasoning_effort": "medium",
263+
"supports_attachments": false
264+
},
265+
{
266+
"id": "hf:Qwen/Qwen2.5-Coder-32B-Instruct",
267+
"name": "Qwen2.5 Coder 32B Instruct",
268+
"cost_per_1m_in": 0.14,
269+
"cost_per_1m_out": 0.55,
270+
"cost_per_1m_in_cached": 0.14,
271+
"cost_per_1m_out_cached": 0.55,
272+
"context_window": 32768,
273+
"default_max_tokens": 32768,
274+
"can_reason": true,
275+
"reasoning_levels": [
276+
"low",
277+
"medium",
278+
"high"
279+
],
280+
"default_reasoning_effort": "medium",
281+
"supports_attachments": false
282+
},
283+
{
284+
"id": "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
285+
"name": "Qwen3 235B A22B Instruct 2507",
286+
"cost_per_1m_in": 0.22,
287+
"cost_per_1m_out": 0.88,
288+
"cost_per_1m_in_cached": 0.55,
289+
"cost_per_1m_out_cached": 1.1,
290+
"context_window": 262144,
291+
"context_window": 256000,
292+
"default_max_tokens": 6912,
293+
"can_reason": true,
294+
"reasoning_levels": [
295+
"low",
296+
"medium",
297+
"high"
298+
],
299+
"default_reasoning_effort": "medium",
300+
"supports_attachments": false
301+
},
302+
{
303+
"id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
304+
"name": "Qwen3 235B A22B Thinking 2507",
305+
"cost_per_1m_in": 0.65,
306+
"cost_per_1m_out": 3.0,
307+
"cost_per_1m_in_cached": 0.55,
308+
"cost_per_1m_out_cached": 1.1,
309+
"context_window": 256000,
310+
"default_max_tokens": 81920,
311+
"can_reason": true,
312+
"reasoning_levels": [
313+
"low",
314+
"medium",
315+
"high"
316+
],
317+
"default_reasoning_effort": "medium",
318+
"supports_attachments": false
319+
},
320+
{
321+
"id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
322+
"name": "Qwen3 Coder 480B A35B Instruct",
323+
"cost_per_1m_in": 0.45,
324+
"cost_per_1m_out": 1.8,
325+
"cost_per_1m_in_cached": 0.82,
326+
"cost_per_1m_out_cached": 1.65,
327+
"context_window": 256000,
328+
"default_max_tokens": 262144,
329+
"can_reason": true,
330+
"reasoning_levels": [
331+
"low",
332+
"medium",
333+
"high"
334+
],
335+
"default_reasoning_effort": "medium",
336+
"supports_attachments": false
337+
},
338+
{
339+
"id": "hf:zai-org/GLM-4.5",
340+
"name": "GLM-4.5",
341+
"cost_per_1m_in": 0.55,
342+
"cost_per_1m_out": 2.19,
343+
"cost_per_1m_in_cached": 0.14,
344+
"cost_per_1m_out_cached": 0.55,
345+
"context_window": 128000,
346+
"default_max_tokens": 98304,
347+
"can_reason": true,
348+
"reasoning_levels": [
349+
"low",
350+
"medium",
351+
"high"
352+
],
353+
"default_reasoning_effort": "medium",
354+
"supports_attachments": false
355+
},
356+
{
357+
"id": "hf:zai-org/GLM-4.6",
358+
"name": "GLM-4.6",
359+
"cost_per_1m_in": 0.55,
360+
"cost_per_1m_out": 0.55,
361+
"cost_per_1m_in_cached": 0.27,
362+
"cost_per_1m_out_cached": 0.55,
363+
"context_window": 198000,
364+
"default_max_tokens": 65536,
365+
"can_reason": true,
366+
"reasoning_levels": [
367+
"low",
368+
"medium",
369+
"high"
370+
],
371+
"default_reasoning_effort": "medium",
372+
"supports_attachments": false
373+
}
374+
]
375+
}

0 commit comments

Comments
 (0)