Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions chatapi/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,29 @@ For model choices view:
- Deepseek: https://api-docs.deepseek.com/quick_start/pricing
- Gemini: https://deepmind.google/technologies/gemini/

### Assistants to Responses migration checklist

Use `rg "beta.assistants"` to confirm the deprecated helpers have been removed. The migration to the Responses API touched:

1. Backend utilities (`src/utils/chat-assistant.utils.ts`, `src/utils/chat-helpers.utils.ts`) where assistant threads were replaced with `client.responses.create` and streaming wrappers.
2. Express/websocket wiring (`src/index.ts`) so every event now carries Responses metadata alongside the textual delta.
3. Angular consumers (`src/app/shared/chat.service.ts`, `src/app/chat/**`) which now normalise structured stream payloads while remaining backward compatible with plain strings.
4. Automated coverage (`chatapi/src/utils/chat-assistant.utils.spec.ts`, `src/app/chat/chat-window/chat-window.component.spec.ts`) to lock the new contract in place.

## Development Notes
Run `cd chatapi` and add a .env file in the `chatapi` directory with the following configs in the .env file(ensure the username & password match your admin credentials):
```
SERVE_PORT=5000
COUCHDB_HOST=http://localhost:2200
COUCHDB_USER=planet
COUCHDB_PASS=planet
# Optional assistant overrides for the Responses API
OPENAI_ASSISTANT_NAME="OLE Assistant"
OPENAI_ASSISTANT_INSTRUCTIONS="Keep answers short"
OPENAI_RESPONSE_FORMAT=text
OPENAI_PARALLEL_TOOL_CALLS=false
# JSON array describing default tool configuration
OPENAI_ASSISTANT_TOOLS='[{"type":"code_interpreter"}]'
```

By default(linux), the chatapi uses 5000 as the serve port. For *windows* and *macOS* users we recommend using `5400` as the serve port to avoid conflicts with other services.
Expand Down
11 changes: 11 additions & 0 deletions chatapi/jest.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import type { Config } from 'jest';

const config: Config = {
preset: 'ts-jest',
testEnvironment: 'node',
roots: [ '<rootDir>/src' ],
moduleFileExtensions: [ 'ts', 'js', 'json' ],
collectCoverageFrom: [ 'src/**/*.ts', '!src/index.ts' ],
};

export default config;
8 changes: 6 additions & 2 deletions chatapi/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
"build": "tsc",
"dev": "nodemon --exec ts-node src/index.ts",
"lint": "eslint . --ext .ts",
"lint-fix": "eslint . --ext .ts --fix"
"lint-fix": "eslint . --ext .ts --fix",
"test": "jest"
},
"repository": {
"type": "git",
Expand Down Expand Up @@ -43,10 +44,13 @@
"ws": "^8.14.2"
},
"devDependencies": {
"@types/jest": "^29.5.12",
"@types/node": "^20.3.1",
"@typescript-eslint/eslint-plugin": "^5.60.0",
"@typescript-eslint/parser": "^5.60.0",
"eslint": "^8.43.0",
"nodemon": "^2.0.22"
"jest": "^29.7.0",
"nodemon": "^2.0.22",
"ts-jest": "^29.2.5"
}
}
76 changes: 73 additions & 3 deletions chatapi/src/config/ai-providers.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,61 @@
import OpenAI from 'openai';

import { configurationDB } from './nano.config';
import { ModelsDocument } from '../models/chat.model';
import { AssistantResponseFormat, AssistantToolConfig, ModelsDocument } from '../models/chat.model';

let keys: Record<string, any> = {};
let models: Record<string, any> = {};
let assistant: Record<string, any> = {};

const parseBoolean = (value: string | undefined): boolean | undefined => {
if (value === undefined) {
return undefined;
}

const normalized = value.trim().toLowerCase();
if ([ 'true', '1', 'yes', 'y' ].includes(normalized)) {
return true;
}
if ([ 'false', '0', 'no', 'n' ].includes(normalized)) {
return false;
}
return undefined;
};

const parseJSON = <T>(value: string | undefined): T | undefined => {
if (!value) {
return undefined;
}
try {
return JSON.parse(value) as T;
} catch (error) {
console.error(`Failed to parse JSON value from environment: ${error}`); // eslint-disable-line no-console
return undefined;
}
};

const parseResponseFormat = (value: string | undefined): AssistantResponseFormat | undefined => {
if (!value) {
return undefined;
}

if (value.trim().startsWith('{')) {
return parseJSON<AssistantResponseFormat>(value);
}

return value;
};

const parseTools = (value: string | undefined): AssistantToolConfig[] | undefined => {
const parsed = parseJSON<AssistantToolConfig[]>(value);

if (!parsed) {
return undefined;
}

return Array.isArray(parsed) ? parsed : undefined;
};

async function getConfig(): Promise<ModelsDocument | undefined> {
try {
const allDocs = await configurationDB.list({ 'include_docs': true });
Expand Down Expand Up @@ -54,10 +103,31 @@ const initialize = async () => {
'gemini': { 'ai': keys.gemini, 'defaultModel': doc?.models.gemini || '' },
};

const envAssistantName = process.env.OPENAI_ASSISTANT_NAME;
const envAssistantInstructions = process.env.OPENAI_ASSISTANT_INSTRUCTIONS;
const envResponseFormat = parseResponseFormat(process.env.OPENAI_RESPONSE_FORMAT);
const envParallelToolCalls = parseBoolean(process.env.OPENAI_PARALLEL_TOOL_CALLS);
const envTools = parseTools(process.env.OPENAI_ASSISTANT_TOOLS);

const computedTools = Array.isArray(doc?.assistant?.tools)
? doc?.assistant?.tools
: envTools ?? [ { 'type': 'code_interpreter' } ];

assistant = {
'name': doc?.assistant?.name || '',
'instructions': doc?.assistant?.instructions || '',
'name': doc?.assistant?.name || envAssistantName || '',
'instructions': doc?.assistant?.instructions || envAssistantInstructions || '',
'tools': computedTools,
};

const resolvedResponseFormat = doc?.assistant?.response_format ?? envResponseFormat;
if (resolvedResponseFormat !== undefined) {
assistant.response_format = resolvedResponseFormat;
}

const resolvedParallelToolCalls = doc?.assistant?.parallel_tool_calls ?? envParallelToolCalls;
if (resolvedParallelToolCalls !== undefined) {
assistant.parallel_tool_calls = resolvedParallelToolCalls;
}
} catch (error) {
console.error(`Error initializing configs: ${error}`);
}
Expand Down
29 changes: 24 additions & 5 deletions chatapi/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import http from 'http';
import WebSocket from 'ws';

import { chat, chatNoSave } from './services/chat.service';
import { keys } from './config/ai-providers.config';
import { assistant, keys } from './config/ai-providers.config';

dotenv.config();

Expand Down Expand Up @@ -37,14 +37,25 @@ wss.on('connection', (ws) => {
}

const chatResponse = await chat(data, true, (response) => {
ws.send(JSON.stringify({ 'type': 'partial', response }));
ws.send(JSON.stringify({
'type': 'partial',
'response': response,
'metadata': {
'source': 'responses',
'response_format': assistant?.response_format ?? 'text',
}
}));
});

if (chatResponse) {
ws.send(JSON.stringify({
'type': 'final',
'completionText': chatResponse.completionText,
'couchDBResponse': chatResponse.couchSaveResponse
'couchDBResponse': chatResponse.couchSaveResponse,
'metadata': {
'source': 'responses',
'response_format': assistant?.response_format ?? 'text',
}
}));
}
} catch (error: any) {
Expand All @@ -69,14 +80,22 @@ app.post('/', async (req: any, res: any) => {
const response = await chatNoSave(data.content, data.aiProvider, data.context, data.assistant, false);
return res.status(200).json({
'status': 'Success',
'chat': response
'chat': response,
'metadata': {
'source': 'responses',
'response_format': assistant?.response_format ?? 'text',
}
});
} else {
const response = await chat(data, false);
return res.status(201).json({
'status': 'Success',
'chat': response?.completionText,
'couchDBResponse': response?.couchSaveResponse
'couchDBResponse': response?.couchSaveResponse,
'metadata': {
'source': 'responses',
'response_format': assistant?.response_format ?? 'text',
}
});
}
} catch (error: any) {
Expand Down
13 changes: 13 additions & 0 deletions chatapi/src/models/chat.model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,22 @@ interface Providers {
gemini?: string;
}

export type AssistantResponseFormat = string | {
type: string;
[key: string]: any;
};

export interface AssistantToolConfig {
type: string;
[key: string]: any;
}

interface Assistant {
name: string;
instructions: string;
response_format?: AssistantResponseFormat;
parallel_tool_calls?: boolean;
tools?: AssistantToolConfig[];
}

export interface ModelsDocument {
Expand Down
12 changes: 10 additions & 2 deletions chatapi/src/services/chat.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,23 @@ export async function chat(data: any, stream?: boolean, callback?: (response: st
export async function chatNoSave(
content: any,
aiProvider: AIProvider,
assistant: boolean,
context?: any,
assistantOrContext: boolean | any,
contextOrAssistant?: any,
stream?: boolean,
callback?: (response: string) => void
): Promise<string | undefined> {
const messages: ChatMessage[] = [];

messages.push({ 'role': 'user', content });

const assistant = typeof assistantOrContext === 'boolean'
? assistantOrContext
: typeof contextOrAssistant === 'boolean'
? contextOrAssistant
: false;

const context = typeof assistantOrContext === 'boolean' ? contextOrAssistant : assistantOrContext;

try {
const completionText = await aiChat(messages, aiProvider, assistant, context, stream, callback);
messages.push({
Expand Down
104 changes: 104 additions & 0 deletions chatapi/src/utils/chat-assistant.utils.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import { buildAssistantResponseParams, createAssistantResponse, createAssistantResponseStream } from './chat-assistant.utils';
import { ChatMessage } from '../models/chat.model';

const mockCreate = jest.fn();
const mockStream = jest.fn();
const mockDone = jest.fn().mockResolvedValue(undefined);

class MockResponseStream {
private events: any[];
finalResponse: jest.Mock;
done = mockDone;

constructor(events: any[], finalResponse: any) {
this.events = events;
this.finalResponse = jest.fn().mockResolvedValue(finalResponse);
}

async *[Symbol.asyncIterator]() {
for (const event of this.events) {
yield event;
}
}
}

jest.mock('../config/ai-providers.config', () => ({
assistant: {
instructions: 'Be concise.',
tools: [ { type: 'code_interpreter' } ],
response_format: 'text',
parallel_tool_calls: true,
},
keys: {
openai: {
responses: {
create: (...args: unknown[]) => mockCreate(...args),
stream: (...args: unknown[]) => mockStream(...args),
},
},
},
}));

describe('chat-assistant utils', () => {
beforeEach(() => {
mockCreate.mockReset();
mockStream.mockReset();
mockDone.mockClear();
});

it('buildAssistantResponseParams merges instructions and context', () => {
const messages: ChatMessage[] = [ { role: 'user', content: 'Hello there' } ];
const params = buildAssistantResponseParams(messages, 'gpt-test', 'Context info');

expect(params.model).toBe('gpt-test');
expect(params.instructions).toContain('Be concise.');
expect(params.instructions).toContain('Context info');
expect(params.tools).toEqual([ { type: 'code_interpreter' } ]);
expect(params.response_format).toBe('text');
expect(params.parallel_tool_calls).toBe(true);
expect(params.input).toEqual([
{
role: 'user',
content: [ { type: 'text', text: 'Hello there' } ]
}
]);
});

it('createAssistantResponse returns aggregated text from the API response', async () => {
mockCreate.mockResolvedValue({
output: [
{
type: 'output_text',
text: 'Aggregated reply'
}
]
});

const messages: ChatMessage[] = [ { role: 'user', content: 'Ping?' } ];
const params = buildAssistantResponseParams(messages, 'gpt-test');
const result = await createAssistantResponse(params);

expect(result).toBe('Aggregated reply');
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({ model: 'gpt-test' }));
});

it('createAssistantResponseStream collects deltas and final response', async () => {
const events = [
{ type: 'response.output_text.delta', delta: 'partial ' },
{ type: 'response.output_text.delta', delta: 'message' },
{ type: 'response.completed', response: { output_text: 'partial message!' } }
];
const streamInstance = new MockResponseStream(events, { output_text: 'partial message!' });
mockStream.mockResolvedValue(streamInstance);

const callback = jest.fn();
const messages: ChatMessage[] = [ { role: 'user', content: 'Summarize.' } ];
const params = buildAssistantResponseParams(messages, 'gpt-test');
const result = await createAssistantResponseStream(params, callback);

expect(result).toBe('partial message!');
expect(callback).toHaveBeenCalledWith('partial ');
expect(callback).toHaveBeenCalledWith('message');
expect(mockStream).toHaveBeenCalled();
});
});
Loading
Loading