Skip to content

Commit 2692ab4

Browse files
committed
Update README for new tool call flow
1 parent eafb4f7 commit 2692ab4

File tree

1 file changed

+82
-76
lines changed

1 file changed

+82
-76
lines changed

packages/ai/README.md

Lines changed: 82 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -31,21 +31,21 @@ const gpt4Rag = createRAGClient(client, {
3131
const astronomyRag = gpt4Rag.withContext({ query: "Astronomy" });
3232

3333
console.time("gpt-4 Time");
34-
console.log(await astronomyRag.queryRag({ prompt: "What color is the sky on Mars?" }));
34+
console.log((await astronomyRag.queryRag({ prompt: "What color is the sky on Mars?" })).content);
3535
console.timeEnd("gpt-4 Time");
3636

3737
const fastAstronomyRag = astronomyRag.withConfig({
3838
model: "gpt-4o",
3939
});
4040

4141
console.time("gpt-4o Time");
42-
console.log(await fastAstronomyRag.queryRag({ prompt: "What color is the sky on Mars?" }));
42+
console.log((await fastAstronomyRag.queryRag({ prompt: "What color is the sky on Mars?" })).content);
4343
console.timeEnd("gpt-4o Time");
4444

4545
const fastChemistryRag = fastAstronomyRag.withContext({ query: "Chemistry" });
4646

4747
console.log(
48-
await fastChemistryRag.queryRag({ prompt: "What is the atomic number of gold?" }),
48+
(await fastChemistryRag.queryRag({ prompt: "What is the atomic number of gold?" })).content,
4949
);
5050

5151
// handle the Response object
@@ -74,63 +74,60 @@ console.log(
7474

7575
The `@gel/ai` package supports tool calls, allowing you to extend the capabilities of the AI model with your own functions. Here's how to use them:
7676

77-
1. **Define the tool:** Create a `SystemMessage` that describes the tool, its parameters, and when it should be used.
78-
2. **Send the request:** Send a request to the model using `queryRag`, including the user's prompt and the tool definition.
79-
3. **Handle the tool call:** If the model decides to use the tool, it will return an `AssistantMessage` with a `tool_calls` array. Your code needs to:
77+
1. **Define your tools**: Create an array of `ToolDefinition` objects that describe your functions, their parameters, and what they do.
78+
2. **Send the request**: Call `queryRag` or `streamRag` with the user's prompt and the `tools` array. You can also use the `tool_choice` parameter to control how the model uses your tools.
79+
3. **Handle the tool call**: If the model decides to use a tool, it will return an `AssistantMessage` with a `tool_calls` array. Your code needs to:
8080
a. Parse the `tool_calls` array to identify the tool and its arguments.
81-
b. Execute the tool.
81+
b. Execute the tool and get the result.
8282
c. Create a `ToolMessage` with the result.
83-
d. Send the `ToolMessage` back to the model.
84-
4. **Receive the final response:** The model will use the tool's output to generate a final response.
83+
d. Send the `ToolMessage` back to the model in a new request.
84+
4. **Receive the final response**: The model will use the tool's output to generate a final response.
8585

8686
### Example
8787

8888
```typescript
8989
import type {
9090
Message,
91-
SystemMessage,
91+
ToolDefinition,
9292
UserMessage,
9393
ToolMessage,
9494
AssistantMessage,
9595
} from "@gel/ai";
9696

97-
// 1. Define the tool in a system message
98-
const systemMessage: SystemMessage = {
99-
role: "system",
100-
content: `
101-
You have access to a tool called "get_weather" that takes a city as a parameter.
102-
Use this tool to answer questions about the weather.
103-
The tool definition is:
104-
{
105-
"name": "get_weather",
106-
"description": "Get the current weather for a given city.",
107-
"parameters": {
108-
"type": "object",
109-
"properties": {
110-
"city": {
111-
"type": "string",
112-
"description": "The city to get the weather for."
113-
}
97+
// 1. Define your tools
98+
const tools: ToolDefinition[] = [
99+
{
100+
type: "function",
101+
name: "get_weather",
102+
description: "Get the current weather for a given city.",
103+
parameters: {
104+
type: "object",
105+
properties: {
106+
city: {
107+
type: "string",
108+
description: "The city to get the weather for.",
114109
},
115-
"required": ["city"]
116-
}
117-
}
118-
`,
119-
};
110+
},
111+
required: ["city"],
112+
},
113+
},
114+
];
120115

121116
// 2. Send the request
122117
const userMessage: UserMessage = {
123118
role: "user",
124119
content: [{ type: "text", text: "What's the weather like in London?" }],
125120
};
126121

127-
const messages: Message[] = [systemMessage, userMessage];
122+
const messages: Message[] = [userMessage];
128123

129124
const response = await ragClient.queryRag({
130125
messages,
126+
tools,
127+
tool_choice: "auto",
131128
});
132129

133-
// 3. Handle the tool call (this is a simplified example)
130+
// 3. Handle the tool call
134131
if (response.tool_calls) {
135132
const toolCall = response.tool_calls[0];
136133
if (toolCall.function.name === "get_weather") {
@@ -150,12 +147,13 @@ if (response.tool_calls) {
150147
// 4. Send the tool result back to the model
151148
const finalResponse = await ragClient.queryRag({
152149
messages,
150+
tools,
153151
});
154152

155-
console.log(finalResponse.text);
153+
console.log(finalResponse.content);
156154
}
157155
} else {
158-
console.log(response.text);
156+
console.log(response.content);
159157
}
160158

161159
// Dummy function for the example
@@ -170,64 +168,72 @@ When using `streamRag`, you can handle tool calls as they arrive in the stream.
170168

171169
```typescript
172170
// Function to handle the streaming response
173-
async function handleStreamingResponse() {
171+
async function handleStreamingResponse(initialMessages: Message[]) {
174172
const stream = ragClient.streamRag({
175-
messages,
173+
messages: initialMessages,
174+
tools,
175+
tool_choice: "auto",
176176
});
177177

178-
let toolCallId: string | null = null;
179-
let functionName: string | null = null;
180-
let functionArguments = "";
181-
let assistantResponse: AssistantMessage | null = null;
178+
let toolCalls: { id: string; name: string; arguments: string }[] = [];
179+
let currentToolCall: { id: string; name: string; arguments: string } | null =
180+
null;
182181

183182
for await (const chunk of stream) {
184183
if (
185184
chunk.type === "content_block_start" &&
186185
chunk.content_block.type === "tool_use"
187186
) {
188-
toolCallId = chunk.content_block.id;
189-
functionName = chunk.content_block.name;
187+
currentToolCall = {
188+
id: chunk.content_block.id!,
189+
name: chunk.content_block.name,
190+
arguments: "",
191+
};
190192
} else if (
191193
chunk.type === "content_block_delta" &&
192-
chunk.delta.type === "tool_call_delta"
194+
chunk.delta.type === "input_json_delta"
193195
) {
194-
functionArguments += chunk.delta.args;
196+
if (currentToolCall) {
197+
currentToolCall.arguments += chunk.delta.partial_json;
198+
}
199+
} else if (chunk.type === "content_block_stop") {
200+
if (currentToolCall) {
201+
toolCalls.push(currentToolCall);
202+
currentToolCall = null;
203+
}
195204
} else if (chunk.type === "message_stop") {
196205
// The model has finished its turn
197-
if (functionName && toolCallId) {
198-
// We have a tool call to execute
199-
const args = JSON.parse(functionArguments);
200-
const weather = await getWeather(args.city); // Your function to get the weather
201-
202-
const toolMessage: ToolMessage = {
203-
role: "tool",
204-
tool_call_id: toolCallId,
205-
content: JSON.stringify({ weather }),
206-
};
207-
208-
// Add the assistant's response and the tool message to the history
209-
// A complete assistant message would be constructed from the stream
210-
assistantResponse = {
206+
if (toolCalls.length > 0) {
207+
const assistantMessage: AssistantMessage = {
211208
role: "assistant",
212-
content: "",
213-
tool_calls: [
214-
{
215-
id: toolCallId,
216-
type: "function",
217-
function: { name: functionName, arguments: functionArguments },
218-
},
219-
],
209+
content: null,
210+
tool_calls: toolCalls.map((tc) => ({
211+
id: tc.id,
212+
type: "function",
213+
function: { name: tc.name, arguments: tc.arguments },
214+
})),
220215
};
221-
messages.push(assistantResponse);
222-
messages.push(toolMessage);
223216

224-
// Reset for the next turn
225-
toolCallId = null;
226-
functionName = null;
227-
functionArguments = "";
217+
const toolMessages: ToolMessage[] = await Promise.all(
218+
toolCalls.map(async (tc) => {
219+
const args = JSON.parse(tc.arguments);
220+
const weather = await getWeather(args.city); // Your function to get the weather
221+
return {
222+
role: "tool",
223+
tool_call_id: tc.id,
224+
content: JSON.stringify({ weather }),
225+
};
226+
}),
227+
);
228+
229+
const newMessages: Message[] = [
230+
...initialMessages,
231+
assistantMessage,
232+
...toolMessages,
233+
];
228234

229235
// Call the function again to get the final response
230-
await handleStreamingResponse();
236+
await handleStreamingResponse(newMessages);
231237
}
232238
} else if (
233239
chunk.type === "content_block_delta" &&
@@ -239,5 +245,5 @@ async function handleStreamingResponse() {
239245
}
240246
}
241247

242-
handleStreamingResponse();
248+
handleStreamingResponse(messages);
243249
```

0 commit comments

Comments
 (0)