11import Anthropic from "@anthropic-ai/sdk" ;
22import { events , kv , timers , messaging } from "@slflows/sdk/v1" ;
3- import { Schema , Validator } from "jsonschema" ;
43
54interface ToolDefinition {
65 blockId : string ;
@@ -66,6 +65,7 @@ export function streamMessage(params: {
6665 force : boolean | string ;
6766 thinking ?: boolean | undefined ;
6867 thinkingBudget ?: number | undefined ;
68+ schema ?: Anthropic . Messages . Tool . InputSchema | undefined ;
6969} ) {
7070 const {
7171 apiKey,
@@ -79,6 +79,7 @@ export function streamMessage(params: {
7979 force,
8080 thinking,
8181 thinkingBudget,
82+ schema,
8283 } = params ;
8384
8485 const client = new Anthropic ( {
@@ -136,7 +137,13 @@ export function streamMessage(params: {
136137 disable_parallel_tool_use : hasMCPServers ,
137138 }
138139 : undefined ,
139- betas : [ "mcp-client-2025-04-04" ] ,
140+ output_format : schema
141+ ? {
142+ type : "json_schema" ,
143+ schema,
144+ }
145+ : undefined ,
146+ betas : [ "mcp-client-2025-04-04" , "structured-outputs-2025-11-13" ] ,
140147 } ) ;
141148}
142149
@@ -269,135 +276,11 @@ export async function syncPendingEventWithStream(
269276 }
270277}
271278
272- export async function generateObject (
273- finalText : string ,
274- params : {
275- apiKey : string ;
276- model : string ;
277- maxTokens : number ;
278- messages : Anthropic . Beta . Messages . BetaMessageParam [ ] ;
279- schema : Anthropic . Messages . Tool . InputSchema ;
280- maxRetries : number ;
281- pendingId : string ;
282- inputTokens : number ;
283- outputTokens : number ;
284- parentEventId : string ;
285- } ,
286- ) : Promise < void > {
287- const {
288- apiKey,
289- model,
290- maxTokens,
291- messages,
292- schema,
293- maxRetries,
294- pendingId,
295- parentEventId,
296- } = params ;
297-
298- let retryCount = 0 ;
299- let { inputTokens, outputTokens } = params ;
300-
301- let lastError : Error | undefined ;
302-
303- while ( retryCount < maxRetries ) {
304- try {
305- await events . updatePending ( pendingId , {
306- statusDescription :
307- retryCount === 0
308- ? "Generating object..."
309- : `Generating object... (retry ${ retryCount + 1 } )` ,
310- } ) ;
311-
312- // Anthropic currently does not support structured output in the same request as the user prompt.
313- // So we need to call the model one more time and force it to use the JSON tool.
314- // The arguments that the model will respond with will be the object that we want to generate.
315-
316- // Remove thinking blocks from messages since we're disabling thinking for this call
317- const messagesWithoutThinking = messages . map ( ( msg ) => ( {
318- ...msg ,
319- content : Array . isArray ( msg . content )
320- ? msg . content . filter ( ( block : any ) => block . type !== "thinking" )
321- : msg . content ,
322- } ) ) ;
323-
324- const stream = streamMessage ( {
325- maxTokens,
326- model,
327- messages : messagesWithoutThinking ,
328- tools : [
329- {
330- name : "json" ,
331- description : "Respond with a JSON object." ,
332- input_schema : schema ,
333- } ,
334- ] ,
335- mcpServers : [ ] ,
336- force : "json" ,
337- apiKey,
338- } ) ;
339-
340- const message = await stream . finalMessage ( ) ;
341-
342- inputTokens += message . usage . input_tokens ;
343- outputTokens += message . usage . output_tokens ;
344-
345- if ( message . stop_reason === "tool_use" ) {
346- const toolCall = message . content . find (
347- ( content ) => content . type === "tool_use" ,
348- ) ;
349-
350- if ( toolCall ) {
351- const validator = new Validator ( ) ;
352- const result = validator . validate ( toolCall . input , schema as Schema ) ;
353-
354- if ( result . errors . length === 0 ) {
355- return emitResult (
356- pendingId ,
357- {
358- text : finalText ,
359- object : toolCall . input ,
360- usage : {
361- inputTokens,
362- outputTokens,
363- } ,
364- } ,
365- parentEventId ,
366- ) ;
367- }
368- }
369- }
370-
371- retryCount ++ ;
372- } catch ( error ) {
373- lastError = error instanceof Error ? error : new Error ( String ( error ) ) ;
374- retryCount ++ ;
375-
376- // If this was the last retry, we'll exit the loop and handle the error below
377- if ( retryCount >= maxRetries ) {
378- break ;
379- }
380- }
381- }
382-
383- // If we get here, all retries failed
384- await events . cancelPending (
385- pendingId ,
386- lastError
387- ? `Object generation failed: ${ lastError . message } `
388- : "Failed to generate object" ,
389- ) ;
390-
391- if ( lastError ) {
392- throw lastError ;
393- }
394- }
395-
396279export async function emitResult (
397280 pendingId : string ,
398281 result : {
399282 text : string | null ;
400- object : unknown ;
283+ output : unknown ;
401284 usage : {
402285 inputTokens : number ;
403286 outputTokens : number ;
@@ -408,8 +291,11 @@ export async function emitResult(
408291 await events . emit (
409292 {
410293 text : result . text ,
411- object : result . object ,
294+ output : result . output ,
412295 usage : result . usage ,
296+
297+ // TODO: Deprecated
298+ object : null ,
413299 } ,
414300 {
415301 complete : pendingId ,
@@ -658,7 +544,6 @@ export async function handleModelResponse(params: {
658544 mcpServers,
659545 systemPrompt,
660546 turn,
661- apiKey,
662547 maxRetries,
663548 schema,
664549 thinking,
@@ -676,32 +561,21 @@ export async function handleModelResponse(params: {
676561 throw new Error ( "Model did not respond with text" ) ;
677562 }
678563
564+ let output = null ;
565+
679566 if ( schema ) {
680- return generateObject ( textPart . text , {
681- apiKey,
682- model,
683- maxTokens,
684- messages : [
685- ...previousMessages ,
686- {
687- role : message . role ,
688- content : message . content ,
689- } ,
690- ] ,
691- schema,
692- maxRetries,
693- pendingId,
694- inputTokens : message . usage . input_tokens ,
695- outputTokens : message . usage . output_tokens ,
696- parentEventId : eventId ,
697- } ) ;
567+ try {
568+ output = JSON . parse ( textPart . text ) ;
569+ } catch {
570+ console . error ( "Failed to parse structured output" ) ;
571+ }
698572 }
699573
700574 return emitResult (
701575 pendingId ,
702576 {
703- text : textPart . text ,
704- object : null ,
577+ text : schema ? null : textPart . text ,
578+ output ,
705579 usage : {
706580 inputTokens : message . usage . input_tokens ,
707581 outputTokens : message . usage . output_tokens ,
@@ -833,6 +707,7 @@ export async function executeTurn(params: {
833707 thinking,
834708 thinkingBudget,
835709 temperature,
710+ schema,
836711 } ) ;
837712
838713 await syncPendingEventWithStream ( pendingId , stream ) ;
0 commit comments