langgraph-chat-transport 1.0.3
Install from the command line:
Learn more about npm packages
$ npm install @keboola/langgraph-chat-transport@1.0.3
Install via package.json:
"@keboola/langgraph-chat-transport": "1.0.3"
About this version
A transport adapter that bridges LangGraph API streaming events to Vercel AI SDK's useChat
hook format.
LangGraph's streaming API uses Server-Sent Events (SSE) with a different message format than what Vercel AI SDK's useChat
hook expects. This adapter transforms LangGraph's message format into the chunk protocol that useChat
understands, enabling seamless integration between LangGraph agents and React chat interfaces.
Without this adapter: You'd need to manually handle LangGraph's SSE streams and convert them to the format expected by useChat
.
With this adapter: Just plug it in as a transport and everything works seamlessly! π
npm install @keboola/langgraph-chat-transport ai@beta
# Add to .npmrc
echo "@keboola:registry=https://npm.pkg.github.com" >> .npmrc
# Install (requires GitHub authentication)
npm install @keboola/langgraph-chat-transport ai@beta
Note: This package requires Vercel AI SDK v5 Beta, which includes the HttpChatTransport system.
import { useChat } from 'ai/react';
import { LangGraphChatTransport } from '@keboola/langgraph-chat-transport';
const transport = new LangGraphChatTransport({
api: '/api/langgraph/stream', // Your API endpoint
});
export function ChatComponent() {
const { messages, input, handleInputChange, handleSubmit } = useChat({
transport,
});
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong> {message.content}
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
<button type="submit">Send</button>
</form>
</div>
);
}
// app/api/langgraph/stream/route.ts
export async function POST(request: Request) {
const { messages } = await request.json();
// Forward to your LangGraph API
const response = await fetch('https://your-langgraph-api.com/stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ messages }),
});
// Return the SSE stream directly
return new Response(response.body, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
},
});
}
import { useChat } from 'ai/react';
import { LangGraphChatTransport } from '@keboola/langgraph-chat-transport';
const transport = new LangGraphChatTransport({
api: '/api/langgraph/stream',
headers: {
'Authorization': 'Bearer YOUR_API_KEY',
},
});
export function AdvancedChatComponent() {
const { messages, input, handleInputChange, handleSubmit, isLoading } = useChat({
transport,
onToolCall: ({ toolCall }) => {
console.log('Tool called:', toolCall.toolName, toolCall.args);
},
});
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong>
{message.content}
{/* Display tool invocations */}
{message.toolInvocations?.map((tool) => (
<div key={tool.toolCallId} className="tool-call">
<strong>π§ {tool.toolName}</strong>
<pre>{JSON.stringify(tool.args, null, 2)}</pre>
{tool.result && (
<div className="tool-result">
Result: {JSON.stringify(tool.result, null, 2)}
</div>
)}
</div>
))}
</div>
))}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
disabled={isLoading}
placeholder="Ask me anything..."
/>
<button type="submit" disabled={isLoading}>
{isLoading ? 'Sending...' : 'Send'}
</button>
</form>
</div>
);
}
import { useChat } from 'ai/react';
import { LangGraphChatTransport } from '@keboola/langgraph-chat-transport';
import type { LangGraphMessage } from '@keboola/langgraph-chat-transport';
import type { UIMessage } from 'ai';
// Custom message type extending UIMessage
interface CustomMessage extends UIMessage {
metadata?: {
model?: string;
tokens?: number;
};
}
const transport = new LangGraphChatTransport<CustomMessage>({
api: '/api/chat',
body: {
model: 'gpt-4',
sessionId: 'user-123',
},
});
export function TypedChat() {
const { messages } = useChat<CustomMessage>({ transport });
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<div>
{message.parts?.map((part) =>
part.type === 'text' ? part.text : null
)}
</div>
{message.metadata?.tokens && (
<span>Tokens: {message.metadata.tokens}</span>
)}
</div>
))}
</div>
);
}
The LangGraphChatTransport
accepts all HttpChatTransportInitOptions
:
interface HttpChatTransportInitOptions {
api?: string; // API endpoint (default: '/api/chat')
headers?: Record<string, string> | Headers; // HTTP headers
body?: object; // Additional request body
credentials?: RequestCredentials; // Fetch credentials mode
fetch?: FetchFunction; // Custom fetch implementation
}
// Basic configuration
const transport = new LangGraphChatTransport({
api: 'https://api.example.com/langgraph/stream',
});
// With authentication
const transport = new LangGraphChatTransport({
api: '/api/chat',
headers: {
'Authorization': 'Bearer ' + process.env.API_KEY,
'X-User-ID': 'user-123',
},
});
// With additional request data
const transport = new LangGraphChatTransport({
api: '/api/chat',
body: {
model: 'gpt-4',
temperature: 0.7,
sessionId: crypto.randomUUID(),
},
});
- β
Message Types:
ai
,human
,system
,tool
- β Text Streaming: Incremental text updates with proper delta handling
- β Tool Calls: Full tool invocation and result streaming
- β Error Handling: Graceful error propagation to UI
- β TypeScript: Full type safety with generics
- β SSE Parsing: Robust Server-Sent Events parsing
- β State Management: Proper text block and tool call tracking
{
"type": "ai",
"content": [
{
"type": "text",
"text": "Hello, I can help you with that..."
},
{
"type": "tool_use",
"id": "tool_1",
"name": "search",
"input": { "query": "example" }
}
]
}
[
{ "type": "text-start", "id": "text-0" },
{ "type": "text-delta", "id": "text-0", "delta": "Hello, I can help..." },
{ "type": "text-end", "id": "text-0" },
{
"type": "tool-input-available",
"toolCallId": "tool_1",
"toolName": "search",
"input": { "query": "example" }
}
]
// app/api/langgraph/stream/route.ts
import { NextRequest } from 'next/server';
export async function POST(request: NextRequest) {
const { messages } = await request.json();
const response = await fetch('https://your-langgraph-api.com/stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${process.env.LANGGRAPH_API_KEY}`,
},
body: JSON.stringify({
messages,
stream_mode: ['messages'],
thread_id: 'unique-thread-id',
}),
});
return new Response(response.body, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
});
}
import express from 'express';
app.post('/api/langgraph/stream', async (req, res) => {
const { messages } = req.body;
const response = await fetch('https://your-langgraph-api.com/stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ messages }),
});
res.setHeader('Content-Type', 'text/event-stream');
res.setHeader('Cache-Control', 'no-cache');
response.body?.pipe(res);
});
const { messages, error, reload } = useChat({
transport,
onError: (error) => {
console.error('Chat error:', error);
// Report to your error tracking service
},
});
if (error) {
return (
<div>
<p>Error: {error.message}</p>
<button onClick={reload}>Retry</button>
</div>
);
}
- Node.js 20+
- Vercel AI SDK 5.0 Beta+
- A LangGraph API endpoint that streams SSE responses
Check out the examples directory for:
- Basic Usage - Simple chat interface
- Tool Integration - Chat with tool calling
- Server Setup - Backend implementation examples
- TypeScript Usage - Advanced TypeScript patterns and utilities
MIT licensed, see LICENSE file.
- Fork the repository
- Create a feature branch
- Make your changes
- Add tests
- Submit a pull request
- Initial release
- Support for
ai
,human
,system
,tool
message types - Full streaming text and tool call support
- TypeScript support with exported types
- Compatible with Vercel AI SDK v5 Beta