@@ -11,13 +11,6 @@ const ImportedPlugin = require("./imported");
11
11
class AgentHandler {
12
12
#invocationUUID;
13
13
#funcsToLoad = [ ] ;
14
- noProviderModelDefault = {
15
- azure : "OPEN_MODEL_PREF" ,
16
- lmstudio : "LMSTUDIO_MODEL_PREF" ,
17
- textgenwebui : null , // does not even use `model` in API req
18
- "generic-openai" : "GENERIC_OPEN_AI_MODEL_PREF" ,
19
- bedrock : "AWS_BEDROCK_LLM_MODEL_PREFERENCE" ,
20
- } ;
21
14
invocation = null ;
22
15
aibitat = null ;
23
16
channel = null ;
@@ -184,53 +177,70 @@ class AgentHandler {
184
177
}
185
178
}
186
179
180
+ /**
181
+ * Finds the default model for a given provider. If no default model is set for it's associated ENV then
182
+ * it will return a reasonable base model for the provider if one exists.
183
+ * @param {string } provider - The provider to find the default model for.
184
+ * @returns {string|null } The default model for the provider.
185
+ */
187
186
providerDefault ( provider = this . provider ) {
188
187
switch ( provider ) {
189
188
case "openai" :
190
- return "gpt-4o" ;
189
+ return process . env . OPEN_MODEL_PREF ?? "gpt-4o" ;
191
190
case "anthropic" :
192
- return "claude-3-sonnet-20240229" ;
191
+ return process . env . ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229" ;
193
192
case "lmstudio" :
194
- return "server-default" ;
193
+ return process . env . LMSTUDIO_MODEL_PREF ?? "server-default" ;
195
194
case "ollama" :
196
- return "llama3:latest" ;
195
+ return process . env . OLLAMA_MODEL_PREF ?? "llama3:latest" ;
197
196
case "groq" :
198
- return "llama3-70b-8192" ;
197
+ return process . env . GROQ_MODEL_PREF ?? "llama3-70b-8192" ;
199
198
case "togetherai" :
200
- return "mistralai/Mixtral-8x7B-Instruct-v0.1" ;
199
+ return (
200
+ process . env . TOGETHER_AI_MODEL_PREF ??
201
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
202
+ ) ;
201
203
case "azure" :
202
- return "gpt-3.5-turbo" ;
203
- case "koboldcpp" :
204
204
return null ;
205
+ case "koboldcpp" :
206
+ return process . env . KOBOLD_CPP_MODEL_PREF ?? null ;
205
207
case "gemini" :
206
- return "gemini-pro" ;
208
+ return process . env . GEMINI_MODEL_PREF ?? "gemini-pro" ;
207
209
case "localai" :
208
- return null ;
210
+ return process . env . LOCAL_AI_MODEL_PREF ?? null ;
209
211
case "openrouter" :
210
- return "openrouter/auto" ;
212
+ return process . env . OPENROUTER_MODEL_PREF ?? "openrouter/auto" ;
211
213
case "mistral" :
212
- return "mistral-medium" ;
214
+ return process . env . MISTRAL_MODEL_PREF ?? "mistral-medium" ;
213
215
case "generic-openai" :
214
- return null ;
216
+ return process . env . GENERIC_OPEN_AI_MODEL_PREF ?? null ;
215
217
case "perplexity" :
216
- return "sonar-small-online" ;
218
+ return process . env . PERPLEXITY_MODEL_PREF ?? "sonar-small-online" ;
217
219
case "textgenwebui" :
218
220
return null ;
219
221
case "bedrock" :
220
- return null ;
222
+ return process . env . AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null ;
221
223
case "fireworksai" :
222
- return null ;
224
+ return process . env . FIREWORKS_AI_LLM_MODEL_PREF ?? null ;
223
225
case "deepseek" :
224
- return "deepseek-chat" ;
226
+ return process . env . DEEPSEEK_MODEL_PREF ?? "deepseek-chat" ;
225
227
case "litellm" :
226
- return null ;
228
+ return process . env . LITE_LLM_MODEL_PREF ?? null ;
227
229
case "apipie" :
228
- return null ;
230
+ return process . env . APIPIE_LLM_MODEL_PREF ?? null ;
229
231
default :
230
- return "unknown" ;
232
+ return null ;
231
233
}
232
234
}
233
235
236
+ /**
237
+ * Attempts to find a fallback provider and model to use if the workspace
238
+ * does not have an explicit `agentProvider` and `agentModel` set.
239
+ * 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
240
+ * 2. Fallback to the system `LLM_PROVIDER` and try to load the the associated default model via ENV params or a base available model.
241
+ * 3. Otherwise, return null - will likely throw an error the user can act on.
242
+ * @returns {object|null } - An object with provider and model keys.
243
+ */
234
244
#getFallbackProvider( ) {
235
245
// First, fallback to the workspace chat provider and model if they exist
236
246
if (
@@ -262,7 +272,7 @@ class AgentHandler {
262
272
* If multi-model loading is supported, we use their agent model selection of the workspace
263
273
* If not supported, we attempt to fallback to the system provider value for the LLM preference
264
274
* and if that fails - we assume a reasonable base model to exist.
265
- * @returns {string } the model preference value to use in API calls
275
+ * @returns {string|null } the model preference value to use in API calls
266
276
*/
267
277
#fetchModel( ) {
268
278
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
@@ -275,21 +285,11 @@ class AgentHandler {
275
285
}
276
286
277
287
// The provider was explicitly set, so check if the workspace has an agent model set.
278
- if ( this . invocation . workspace . agentModel ) {
288
+ if ( this . invocation . workspace . agentModel )
279
289
return this . invocation . workspace . agentModel ;
280
- }
281
290
282
- // If the provider we are using is not supported or does not support multi-model loading
283
- // then we use the default model for the provider.
284
- if ( ! Object . keys ( this . noProviderModelDefault ) . includes ( this . provider ) ) {
285
- return this . providerDefault ( ) ;
286
- }
287
-
288
- // Load the model from the system environment variable for providers with no multi-model loading.
289
- const sysModelKey = this . noProviderModelDefault [ this . provider ] ;
290
- if ( sysModelKey ) return process . env [ sysModelKey ] ?? this . providerDefault ( ) ;
291
-
292
- // Otherwise, we have no model to use - so guess a default model to use.
291
+ // Otherwise, we have no model to use - so guess a default model to use via the provider
292
+ // and it's system ENV params and if that fails - we return either a base model or null.
293
293
return this . providerDefault ( ) ;
294
294
}
295
295
@@ -299,7 +299,6 @@ class AgentHandler {
299
299
300
300
if ( ! this . provider )
301
301
throw new Error ( "No valid provider found for the agent." ) ;
302
-
303
302
this . log ( `Start ${ this . #invocationUUID} ::${ this . provider } :${ this . model } ` ) ;
304
303
this . checkSetup ( ) ;
305
304
}
0 commit comments