/** * AgentMessage: Union of LLM messages + custom messages. * This abstraction allows apps to add custom message types while maintaining * type safety and compatibility with the base LLM messages. */ exporttypeAgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];
exportinterfaceAgentToolResult<T> { // Content blocks supporting text and images content: (TextContent | ImageContent)[]; // Details to be displayed in a UI or logged details: T; } // Callback for streaming tool execution updates exporttypeAgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) =>void; // AgentTool extends Tool but adds the execute function exportinterfaceAgentTool<TParametersextendsTSchema = TSchema, TDetails = any> extendsTool<TParameters> { // A human-readable label for the tool to be displayed in UI label: string; execute: ( toolCallId: string, params: Static<TParameters>, signal?: AbortSignal, onUpdate?: AgentToolUpdateCallback<TDetails>, ) =>Promise<AgentToolResult<TDetails>>; }
/** * Returns steering messages to inject into the conversation mid-run. * * Called after each tool execution to check for user interruptions. * If messages are returned, remaining tool calls are skipped and * these messages are added to the context before the next LLM call. * * Use this for "steering" the agent while it's working. */ getSteeringMessages?: () =>Promise<AgentMessage[]>;
/** * Returns follow-up messages to process after the agent would otherwise stop. * * Called when the agent has no more tool calls and no steering messages. * If messages are returned, they're added to the context and the agent * continues with another turn. * * Use this for follow-up messages that should wait until the agent finishes. */ getFollowUpMessages?: () =>Promise<AgentMessage[]>;