Skip to content

Commit f9e9fc8

Browse files
Merge branch 'master' of github.com:Mintplex-Labs/anything-llm into render
2 parents 0f145cf + 827c3f4 commit f9e9fc8

File tree

6 files changed

+54
-36
lines changed

6 files changed

+54
-36
lines changed

.github/workflows/build-and-push-image-semver.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ jobs:
102102
CVE_EXCEPTIONS: ${{ steps.cve-list.outputs.CVE_EXCEPTIONS }}
103103
run: |
104104
echo $CVE_EXCEPTIONS
105-
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s -- v1.15.1
105+
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s --
106106
for cve in $CVE_EXCEPTIONS; do
107107
for tag in "${{ join(fromJSON(steps.meta.outputs.json).tags, ' ') }}"; do
108108
echo "Attaching VEX exception $cve to $tag"

.github/workflows/build-and-push-image.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ jobs:
121121
CVE_EXCEPTIONS: ${{ steps.cve-list.outputs.CVE_EXCEPTIONS }}
122122
run: |
123123
echo $CVE_EXCEPTIONS
124-
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s -- v1.15.1
124+
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s --
125125
for cve in $CVE_EXCEPTIONS; do
126126
for tag in "${{ join(fromJSON(steps.meta.outputs.json).tags, ' ') }}"; do
127127
echo "Attaching VEX exception $cve to $tag"

.github/workflows/dev-build.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ jobs:
101101
CVE_EXCEPTIONS: ${{ steps.cve-list.outputs.CVE_EXCEPTIONS }}
102102
run: |
103103
echo $CVE_EXCEPTIONS
104-
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s -- v1.15.1
104+
curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s --
105105
for cve in $CVE_EXCEPTIONS; do
106106
for tag in "${{ join(fromJSON(steps.meta.outputs.json).tags, ' ') }}"; do
107107
echo "Attaching VEX exception $cve to $tag"

server/endpoints/api/workspace/index.js

+1
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ function apiWorkspaceEndpoints(app) {
123123
select: {
124124
user_id: true,
125125
slug: true,
126+
name: true,
126127
},
127128
},
128129
},

server/utils/AiProviders/apipie/index.js

+35-26
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,4 @@
11
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
2-
const {
3-
handleDefaultStreamResponseV2,
4-
} = require("../../helpers/chat/responses");
5-
62
const { v4: uuidv4 } = require("uuid");
73
const {
84
writeResponseChunk,
@@ -98,6 +94,24 @@ class ApiPieLLM {
9894
);
9995
}
10096

97+
chatModels() {
98+
const allModels = this.models();
99+
return Object.entries(allModels).reduce(
100+
(chatModels, [modelId, modelInfo]) => {
101+
// Filter for chat models
102+
if (
103+
modelInfo.subtype &&
104+
(modelInfo.subtype.includes("chat") ||
105+
modelInfo.subtype.includes("chatx"))
106+
) {
107+
chatModels[modelId] = modelInfo;
108+
}
109+
return chatModels;
110+
},
111+
{}
112+
);
113+
}
114+
101115
streamingEnabled() {
102116
return "streamGetChatCompletion" in this;
103117
}
@@ -114,13 +128,13 @@ class ApiPieLLM {
114128
}
115129

116130
promptWindowLimit() {
117-
const availableModels = this.models();
131+
const availableModels = this.chatModels();
118132
return availableModels[this.model]?.maxLength || 4096;
119133
}
120134

121135
async isValidChatCompletionModel(model = "") {
122136
await this.#syncModels();
123-
const availableModels = this.models();
137+
const availableModels = this.chatModels();
124138
return availableModels.hasOwnProperty(model);
125139
}
126140

@@ -189,22 +203,20 @@ class ApiPieLLM {
189203
return result.choices[0].message.content;
190204
}
191205

192-
// APIPie says it supports streaming, but it does not work across all models and providers.
193-
// Notably, it is not working for OpenRouter models at all.
194-
// async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
195-
// if (!(await this.isValidChatCompletionModel(this.model)))
196-
// throw new Error(
197-
// `ApiPie chat: ${this.model} is not valid for chat completion!`
198-
// );
199-
200-
// const streamRequest = await this.openai.chat.completions.create({
201-
// model: this.model,
202-
// stream: true,
203-
// messages,
204-
// temperature,
205-
// });
206-
// return streamRequest;
207-
// }
206+
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
207+
if (!(await this.isValidChatCompletionModel(this.model)))
208+
throw new Error(
209+
`ApiPie chat: ${this.model} is not valid for chat completion!`
210+
);
211+
212+
const streamRequest = await this.openai.chat.completions.create({
213+
model: this.model,
214+
stream: true,
215+
messages,
216+
temperature,
217+
});
218+
return streamRequest;
219+
}
208220

209221
handleStream(response, stream, responseProps) {
210222
const { uuid = uuidv4(), sources = [] } = responseProps;
@@ -264,10 +276,6 @@ class ApiPieLLM {
264276
});
265277
}
266278

267-
// handleStream(response, stream, responseProps) {
268-
// return handleDefaultStreamResponseV2(response, stream, responseProps);
269-
// }
270-
271279
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
272280
async embedTextInput(textInput) {
273281
return await this.embedder.embedTextInput(textInput);
@@ -300,6 +308,7 @@ async function fetchApiPieModels(providedApiKey = null) {
300308
id: `${model.provider}/${model.model}`,
301309
name: `${model.provider}/${model.model}`,
302310
organization: model.provider,
311+
subtype: model.subtype,
303312
maxLength: model.max_tokens,
304313
};
305314
});

server/utils/helpers/customModels.js

+15-7
Original file line numberDiff line numberDiff line change
@@ -401,13 +401,21 @@ async function getAPIPieModels(apiKey = null) {
401401
if (!Object.keys(knownModels).length === 0)
402402
return { models: [], error: null };
403403

404-
const models = Object.values(knownModels).map((model) => {
405-
return {
406-
id: model.id,
407-
organization: model.organization,
408-
name: model.name,
409-
};
410-
});
404+
const models = Object.values(knownModels)
405+
.filter((model) => {
406+
// Filter for chat models
407+
return (
408+
model.subtype &&
409+
(model.subtype.includes("chat") || model.subtype.includes("chatx"))
410+
);
411+
})
412+
.map((model) => {
413+
return {
414+
id: model.id,
415+
organization: model.organization,
416+
name: model.name,
417+
};
418+
});
411419
return { models, error: null };
412420
}
413421

0 commit comments

Comments
 (0)