Skip to content

Commit 29d3445

Browse files
fix(backend): small fix for gemini model and headers for streaming (#26)
1 parent af95bcf commit 29d3445

File tree

2 files changed

+40
-11
lines changed

2 files changed

+40
-11
lines changed

packages/backend/src/config/provider/gemini.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ export const loadGeminiChatModels = async () => {
1717
'Gemini Flash 2.5': new ChatGoogleGenerativeAI({
1818
temperature: 0.7,
1919
apiKey: geminiApiKey,
20-
modelName: 'gemini-2.5-flash-preview-04-17',
20+
modelName: 'gemini-2.5-flash',
2121
}),
2222
};
2323

packages/backend/src/routes/chatCompletionHandler.ts

Lines changed: 39 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,11 @@ export async function handleChatCompletion(
132132
let responseContent = '';
133133

134134
agent.on('data', (data: any) => {
135+
// Check if response is already finished
136+
if (res.destroyed || res.writableEnded) {
137+
return;
138+
}
139+
135140
const parsed = JSON.parse(data);
136141

137142
if (parsed.type === 'response') {
@@ -162,14 +167,24 @@ export async function handleChatCompletion(
162167

163168
agent.on('error', (error: any) => {
164169
console.error('Agent error:', error);
165-
res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`);
166-
res.end();
170+
if (!res.destroyed && !res.writableEnded) {
171+
res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`);
172+
res.end();
173+
}
167174
});
168175

169176
agent.on('end', () => {
177+
// Check if response is already finished
178+
if (res.destroyed || res.writableEnded) {
179+
return;
180+
}
181+
170182
const tokenUsage = TokenTracker.getSessionTokenUsage();
171183

172-
res.setHeader('x-total-tokens', tokenUsage.totalTokens.toString());
184+
// Check if headers haven't been sent yet
185+
if (!res.headersSent) {
186+
res.setHeader('x-total-tokens', tokenUsage.totalTokens.toString());
187+
}
173188

174189
const finalChunk = {
175190
id: uuidv4(),
@@ -210,16 +225,25 @@ export async function handleChatCompletion(
210225

211226
agent.on('error', (error: any) => {
212227
console.error('Agent error:', error);
213-
res.status(500).json({
214-
error: {
215-
message: error.message,
216-
type: 'server_error',
217-
code: 'internal_error',
218-
},
219-
});
228+
229+
// Check if headers haven't been sent yet
230+
if (!res.headersSent) {
231+
res.status(500).json({
232+
error: {
233+
message: error.message,
234+
type: 'server_error',
235+
code: 'internal_error',
236+
},
237+
});
238+
}
220239
});
221240

222241
agent.on('end', () => {
242+
// Check if headers haven't been sent yet
243+
if (res.headersSent) {
244+
return;
245+
}
246+
223247
const tokenUsage = TokenTracker.getSessionTokenUsage();
224248

225249
res.setHeader('x-total-tokens', tokenUsage.totalTokens.toString());
@@ -258,6 +282,11 @@ export async function handleChatCompletion(
258282
} catch (error) {
259283
console.error('Error in chat completion:', error);
260284

285+
// Check if headers haven't been sent yet
286+
if (res.headersSent) {
287+
return;
288+
}
289+
261290
// Map common errors to OpenAI error format
262291
if (error instanceof Error) {
263292
const errorResponse: any = {

0 commit comments

Comments
 (0)