diff --git a/.gitignore b/.gitignore
index 83468ded9..f385f2c13 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,4 +38,4 @@ typings
clean-install.sh
lint-fix.sh
-draft/
\ No newline at end of file
+draft/
diff --git a/examples/vercel-ai-chat/.gitignore b/examples/vercel-ai-chat/.gitignore
new file mode 100644
index 000000000..63271800a
--- /dev/null
+++ b/examples/vercel-ai-chat/.gitignore
@@ -0,0 +1,39 @@
+# dependencies
+/node_modules
+/.pnp
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/versions
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# env files (can opt-in for committing if needed)
+.env*
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
diff --git a/examples/vercel-ai-chat/eslint.config.mjs b/examples/vercel-ai-chat/eslint.config.mjs
new file mode 100644
index 000000000..199c0ecf8
--- /dev/null
+++ b/examples/vercel-ai-chat/eslint.config.mjs
@@ -0,0 +1,16 @@
+import { defineConfig, globalIgnores } from "eslint/config";
+import nextVitals from "eslint-config-next/core-web-vitals";
+import nextTs from "eslint-config-next/typescript";
+
+const eslintConfig = defineConfig([
+ ...nextVitals,
+ ...nextTs,
+ globalIgnores([
+ ".next/**",
+ "out/**",
+ "build/**",
+ "next-env.d.ts",
+ ]),
+]);
+
+export default eslintConfig;
diff --git a/examples/vercel-ai-chat/next.config.ts b/examples/vercel-ai-chat/next.config.ts
new file mode 100644
index 000000000..aa26f3fff
--- /dev/null
+++ b/examples/vercel-ai-chat/next.config.ts
@@ -0,0 +1,12 @@
+import type { NextConfig } from "next";
+
+const nextConfig: NextConfig = {
+ turbopack: {},
+ transpilePackages: [
+ "@openuidev/react-ui",
+ "@openuidev/react-headless",
+ "@openuidev/lang-react",
+ ],
+};
+
+export default nextConfig;
diff --git a/examples/vercel-ai-chat/package.json b/examples/vercel-ai-chat/package.json
new file mode 100644
index 000000000..b3afb7b60
--- /dev/null
+++ b/examples/vercel-ai-chat/package.json
@@ -0,0 +1,32 @@
+{
+ "name": "vercel-ai-chat",
+ "version": "0.1.0",
+ "private": true,
+ "scripts": {
+ "dev": "next dev",
+ "build": "next build",
+ "start": "next start",
+ "lint": "eslint"
+ },
+ "dependencies": {
+ "@ai-sdk/openai": "^3.0.0",
+ "@openuidev/lang-react": "workspace:*",
+ "@openuidev/react-headless": "workspace:*",
+ "@openuidev/react-ui": "workspace:*",
+ "ai": "^6.0.0",
+ "next": "16.1.6",
+ "react": "19.2.3",
+ "react-dom": "19.2.3",
+ "zod": "^3.25.76"
+ },
+ "devDependencies": {
+ "@tailwindcss/postcss": "^4",
+ "@types/node": "^20",
+ "@types/react": "^19",
+ "@types/react-dom": "^19",
+ "eslint": "^9",
+ "eslint-config-next": "16.1.6",
+ "tailwindcss": "^4",
+ "typescript": "^5"
+ }
+}
diff --git a/examples/vercel-ai-chat/postcss.config.mjs b/examples/vercel-ai-chat/postcss.config.mjs
new file mode 100644
index 000000000..61e36849c
--- /dev/null
+++ b/examples/vercel-ai-chat/postcss.config.mjs
@@ -0,0 +1,7 @@
+const config = {
+ plugins: {
+ "@tailwindcss/postcss": {},
+ },
+};
+
+export default config;
diff --git a/examples/vercel-ai-chat/src/app/agent/page.tsx b/examples/vercel-ai-chat/src/app/agent/page.tsx
new file mode 100644
index 000000000..90618d553
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/agent/page.tsx
@@ -0,0 +1,62 @@
+"use client";
+import "@openuidev/react-ui/components.css";
+import "@openuidev/react-ui/styles/index.css";
+
+import { vercelAIAdapter, vercelAIMessageFormat } from "@openuidev/react-headless";
+import { FullScreen } from "@openuidev/react-ui";
+import { defaultExamples, defaultLibrary } from "@openuidev/react-ui/genui-lib";
+
+const systemPrompt = `You are a helpful AI agent with access to tools. Use them when appropriate.
+
+Available tools:
+- get_weather: Get current weather for any city
+- get_stock_price: Get stock prices by ticker symbol (e.g. AAPL, GOOGL)
+- calculate: Evaluate math expressions
+- search_web: Search the web for information
+
+Always use the appropriate tool when the user asks about weather, stocks, math, or needs web information. Present results clearly using markdown and GenUI components.
+
+Your response should be in the following vertical format:
+don't stack cards horizontally, always stack them vertically.
+don't use any other format.
+${defaultLibrary.prompt({ examples: defaultExamples })}`;
+
+export default function AgentPage() {
+ return (
+
+ {
+ return fetch("/api/agent", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ messages: vercelAIMessageFormat.toApi(messages),
+ systemPrompt,
+ }),
+ signal: abortController.signal,
+ });
+ }}
+ streamProtocol={vercelAIAdapter()}
+ componentLibrary={defaultLibrary}
+ agentName="Vercel AI Agent"
+ conversationStarters={{
+ variant: "short",
+ options: [
+ {
+ displayText: "Weather in Tokyo",
+ prompt: "What's the weather like in Tokyo right now?",
+ },
+ {
+ displayText: "AAPL stock price",
+ prompt: "What's the current stock price for AAPL?",
+ },
+ {
+ displayText: "Calculate something",
+ prompt: "What is (42 * 17) + sqrt(144)?",
+ },
+ ],
+ }}
+ />
+
+ );
+}
diff --git a/examples/vercel-ai-chat/src/app/api/agent/route.ts b/examples/vercel-ai-chat/src/app/api/agent/route.ts
new file mode 100644
index 000000000..56716178f
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/api/agent/route.ts
@@ -0,0 +1,159 @@
+import { streamText, tool, stepCountIs } from "ai";
+import type { ModelMessage } from "ai";
+import { openai } from "@ai-sdk/openai";
+import { z } from "zod";
+
+export const maxDuration = 30;
+
+const weatherTool = tool({
+ description: "Get current weather for a location.",
+ inputSchema: z.object({
+ location: z.string().describe("City name"),
+ }),
+ execute: async ({ location }) => {
+ await new Promise((r) => setTimeout(r, 800));
+ const knownTemps: Record = {
+ tokyo: 22,
+ "san francisco": 18,
+ london: 14,
+ "new york": 25,
+ paris: 19,
+ sydney: 27,
+ mumbai: 33,
+ berlin: 16,
+ };
+ const conditions = [
+ "Sunny",
+ "Partly Cloudy",
+ "Cloudy",
+ "Light Rain",
+ "Clear Skies",
+ ];
+ const temp =
+ knownTemps[location.toLowerCase()] ?? Math.floor(Math.random() * 30 + 5);
+ const condition =
+ conditions[Math.floor(Math.random() * conditions.length)];
+ return {
+ location,
+ temperature_celsius: temp,
+ temperature_fahrenheit: Math.round(temp * 1.8 + 32),
+ condition,
+ humidity_percent: Math.floor(Math.random() * 40 + 40),
+ wind_speed_kmh: Math.floor(Math.random() * 25 + 5),
+ forecast: [
+ {
+ day: "Tomorrow",
+ high: temp + 2,
+ low: temp - 4,
+ condition: "Partly Cloudy",
+ },
+ {
+ day: "Day After",
+ high: temp + 1,
+ low: temp - 3,
+ condition: "Sunny",
+ },
+ ],
+ };
+ },
+});
+
+const stockPriceTool = tool({
+ description: "Get stock price for a ticker symbol.",
+ inputSchema: z.object({
+ symbol: z.string().describe("Ticker symbol, e.g. AAPL"),
+ }),
+ execute: async ({ symbol }) => {
+ await new Promise((r) => setTimeout(r, 600));
+ const s = symbol.toUpperCase();
+ const knownPrices: Record = {
+ AAPL: 189.84,
+ GOOGL: 141.8,
+ TSLA: 248.42,
+ MSFT: 378.91,
+ AMZN: 178.25,
+ NVDA: 875.28,
+ META: 485.58,
+ };
+ const price = knownPrices[s] ?? Math.floor(Math.random() * 500 + 20);
+ const change = parseFloat((Math.random() * 8 - 4).toFixed(2));
+ return {
+ symbol: s,
+ price: parseFloat((price + change).toFixed(2)),
+ change,
+ change_percent: parseFloat(((change / price) * 100).toFixed(2)),
+ volume: `${(Math.random() * 50 + 10).toFixed(1)}M`,
+ day_high: parseFloat((price + Math.abs(change) + 1.5).toFixed(2)),
+ day_low: parseFloat((price - Math.abs(change) - 1.2).toFixed(2)),
+ };
+ },
+});
+
+const calculateTool = tool({
+ description: "Evaluate a math expression.",
+ inputSchema: z.object({
+ expression: z.string().describe("Math expression"),
+ }),
+ execute: async ({ expression }) => {
+ await new Promise((r) => setTimeout(r, 300));
+ try {
+ const sanitized = expression.replace(
+ /[^0-9+\-*/().%\s,Math.sqrtpowabsceilfloorround]/g,
+ "",
+ );
+ const result = new Function(`return (${sanitized})`)();
+ return { expression, result: Number(result) };
+ } catch {
+ return { expression, error: "Invalid expression" };
+ }
+ },
+});
+
+const searchWebTool = tool({
+ description: "Search the web for information.",
+ inputSchema: z.object({
+ query: z.string().describe("Search query"),
+ }),
+ execute: async ({ query }) => {
+ await new Promise((r) => setTimeout(r, 1000));
+ return {
+ query,
+ results: [
+ {
+ title: `Top result for "${query}"`,
+ snippet: `Comprehensive overview of ${query} with the latest information.`,
+ },
+ {
+ title: `${query} - Latest News`,
+ snippet: `Recent developments and updates related to ${query}.`,
+ },
+ {
+ title: `Understanding ${query}`,
+ snippet: `An in-depth guide explaining everything about ${query}.`,
+ },
+ ],
+ };
+ },
+});
+
+export async function POST(req: Request) {
+ const { messages, systemPrompt } = (await req.json()) as {
+ messages: ModelMessage[];
+ systemPrompt?: string;
+ };
+
+ const result = streamText({
+ model: openai("gpt-5.2"),
+ system: systemPrompt,
+ messages,
+ tools: {
+ get_weather: weatherTool,
+ get_stock_price: stockPriceTool,
+ calculate: calculateTool,
+ search_web: searchWebTool,
+ },
+ stopWhen: stepCountIs(5),
+ });
+
+ return result.toUIMessageStreamResponse();
+}
diff --git a/examples/vercel-ai-chat/src/app/api/chat/route.ts b/examples/vercel-ai-chat/src/app/api/chat/route.ts
new file mode 100644
index 000000000..a5751876f
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/api/chat/route.ts
@@ -0,0 +1,20 @@
+import { streamText } from "ai";
+import type { ModelMessage } from "ai";
+import { openai } from "@ai-sdk/openai";
+
+export const maxDuration = 30;
+
+export async function POST(req: Request) {
+ const { messages, systemPrompt } = (await req.json()) as {
+ messages: ModelMessage[];
+ systemPrompt?: string;
+ };
+
+ const result = streamText({
+ model: openai("gpt-5.2"),
+ system: systemPrompt,
+ messages,
+ });
+
+ return result.toUIMessageStreamResponse();
+}
diff --git a/examples/vercel-ai-chat/src/app/globals.css b/examples/vercel-ai-chat/src/app/globals.css
new file mode 100644
index 000000000..f1d8c73cd
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/globals.css
@@ -0,0 +1 @@
+@import "tailwindcss";
diff --git a/examples/vercel-ai-chat/src/app/layout.tsx b/examples/vercel-ai-chat/src/app/layout.tsx
new file mode 100644
index 000000000..32b1b8b88
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/layout.tsx
@@ -0,0 +1,19 @@
+import type { Metadata } from "next";
+import "./globals.css";
+
+export const metadata: Metadata = {
+ title: "Vercel AI Chat",
+ description: "Generative UI Chat with Vercel AI SDK",
+};
+
+export default function RootLayout({
+ children,
+}: Readonly<{
+ children: React.ReactNode;
+}>) {
+ return (
+
+ {children}
+
+ );
+}
diff --git a/examples/vercel-ai-chat/src/app/page.tsx b/examples/vercel-ai-chat/src/app/page.tsx
new file mode 100644
index 000000000..470055619
--- /dev/null
+++ b/examples/vercel-ai-chat/src/app/page.tsx
@@ -0,0 +1,32 @@
+"use client";
+import "@openuidev/react-ui/components.css";
+import "@openuidev/react-ui/styles/index.css";
+
+import { vercelAIAdapter, vercelAIMessageFormat } from "@openuidev/react-headless";
+import { FullScreen } from "@openuidev/react-ui";
+import { defaultLibrary, defaultPromptOptions } from "@openuidev/react-ui/genui-lib";
+
+const systemPrompt = defaultLibrary.prompt(defaultPromptOptions);
+
+export default function Home() {
+ return (
+
+ {
+ return fetch("/api/chat", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ messages: vercelAIMessageFormat.toApi(messages),
+ systemPrompt,
+ }),
+ signal: abortController.signal,
+ });
+ }}
+ streamProtocol={vercelAIAdapter()}
+ componentLibrary={defaultLibrary}
+ agentName="Vercel AI Chat"
+ />
+
+ );
+}
diff --git a/examples/vercel-ai-chat/tsconfig.json b/examples/vercel-ai-chat/tsconfig.json
new file mode 100644
index 000000000..cf9c65d3e
--- /dev/null
+++ b/examples/vercel-ai-chat/tsconfig.json
@@ -0,0 +1,34 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "react-jsx",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./src/*"]
+ }
+ },
+ "include": [
+ "next-env.d.ts",
+ "**/*.ts",
+ "**/*.tsx",
+ ".next/types/**/*.ts",
+ ".next/dev/types/**/*.ts",
+ "**/*.mts"
+ ],
+ "exclude": ["node_modules"]
+}
diff --git a/packages/react-headless/package.json b/packages/react-headless/package.json
index 4b705526a..5aae11bf1 100644
--- a/packages/react-headless/package.json
+++ b/packages/react-headless/package.json
@@ -27,6 +27,7 @@
},
"devDependencies": {
"@types/react": ">=17.0.0",
+ "ai": "^6.0.108",
"openai": "^6.22.0",
"vitest": "^4.0.18"
},
diff --git a/packages/react-headless/src/index.ts b/packages/react-headless/src/index.ts
index 7d815f924..3d5eadabe 100644
--- a/packages/react-headless/src/index.ts
+++ b/packages/react-headless/src/index.ts
@@ -9,6 +9,8 @@ export {
openAIMessageFormat,
openAIReadableStreamAdapter,
openAIResponsesAdapter,
+ vercelAIAdapter,
+ vercelAIMessageFormat,
} from "./stream/adapters";
export { processStreamedMessage } from "./stream/processStreamedMessage";
diff --git a/packages/react-headless/src/stream/adapters/__tests__/vercel-ai-message-format.test.ts b/packages/react-headless/src/stream/adapters/__tests__/vercel-ai-message-format.test.ts
new file mode 100644
index 000000000..e2f6e734e
--- /dev/null
+++ b/packages/react-headless/src/stream/adapters/__tests__/vercel-ai-message-format.test.ts
@@ -0,0 +1,687 @@
+import type { AssistantModelMessage, ModelMessage, ToolModelMessage, UserModelMessage } from "ai";
+import { describe, expect, it } from "vitest";
+import type {
+ AssistantMessage,
+ Message,
+ SystemMessage,
+ ToolMessage,
+ UserMessage,
+} from "../../../types";
+import { vercelAIMessageFormat } from "../vercel-ai-message-format";
+
+const { toApi, fromApi } = vercelAIMessageFormat;
+
+// ── toApi ────────────────────────────────────────────────────────
+
+describe("vercelAIMessageFormat", () => {
+ describe("toApi", () => {
+ it("converts a text user message", () => {
+ const messages: Message[] = [{ id: "u1", role: "user", content: "Hello" }];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toEqual([{ role: "user", content: "Hello" }]);
+ });
+
+ it("converts a text assistant message", () => {
+ const messages: Message[] = [{ id: "a1", role: "assistant", content: "Hi there" }];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toEqual([
+ { role: "assistant", content: [{ type: "text", text: "Hi there" }] },
+ ]);
+ });
+
+ it("converts a system message", () => {
+ const messages: Message[] = [{ id: "s1", role: "system", content: "You are helpful" }];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toEqual([{ role: "system", content: "You are helpful" }]);
+ });
+
+ it("converts a developer message to system role", () => {
+ const messages: Message[] = [{ id: "d1", role: "developer", content: "Be concise" }];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toEqual([{ role: "system", content: "Be concise" }]);
+ });
+
+ it("converts assistant message with tool calls", () => {
+ const messages: Message[] = [
+ {
+ id: "a1",
+ role: "assistant",
+ content: "Let me check",
+ toolCalls: [
+ {
+ id: "tc1",
+ type: "function",
+ function: {
+ name: "get_weather",
+ arguments: '{"city":"SF"}',
+ },
+ },
+ ],
+ } as AssistantMessage,
+ ];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toEqual([
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "Let me check" },
+ {
+ type: "tool-call",
+ toolCallId: "tc1",
+ toolName: "get_weather",
+ input: { city: "SF" },
+ },
+ ],
+ },
+ ]);
+ });
+
+ it("falls back to empty input when tool call arguments are malformed JSON", () => {
+ const messages: Message[] = [
+ {
+ id: "a1",
+ role: "assistant",
+ toolCalls: [
+ {
+ id: "tc1",
+ type: "function",
+ function: { name: "broken", arguments: "{not valid json" },
+ },
+ ],
+ } as AssistantMessage,
+ ];
+
+ const result = toApi(messages) as AssistantModelMessage[];
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.content).toEqual([
+ {
+ type: "tool-call",
+ toolCallId: "tc1",
+ toolName: "broken",
+ input: {},
+ },
+ ]);
+ });
+
+ it("converts assistant message with only tool calls (no text)", () => {
+ const messages: Message[] = [
+ {
+ id: "a1",
+ role: "assistant",
+ toolCalls: [
+ {
+ id: "tc1",
+ type: "function",
+ function: { name: "search", arguments: '{"q":"test"}' },
+ },
+ ],
+ } as AssistantMessage,
+ ];
+
+ const result = toApi(messages) as AssistantModelMessage[];
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.content).toEqual([
+ {
+ type: "tool-call",
+ toolCallId: "tc1",
+ toolName: "search",
+ input: { q: "test" },
+ },
+ ]);
+ });
+
+ it("groups consecutive tool messages into a single ToolModelMessage", () => {
+ const messages: Message[] = [
+ {
+ id: "t1",
+ role: "tool",
+ content: '{"temp":72}',
+ toolCallId: "tc1",
+ } as ToolMessage,
+ {
+ id: "t2",
+ role: "tool",
+ content: '{"result":4}',
+ toolCallId: "tc2",
+ } as ToolMessage,
+ ];
+
+ const result = toApi(messages) as ToolModelMessage[];
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.role).toBe("tool");
+ expect(result[0]!.content).toEqual([
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "",
+ output: { type: "json", value: { temp: 72 } },
+ },
+ {
+ type: "tool-result",
+ toolCallId: "tc2",
+ toolName: "",
+ output: { type: "json", value: { result: 4 } },
+ },
+ ]);
+ });
+
+ it("uses text output for non-JSON tool content", () => {
+ const messages: Message[] = [
+ {
+ id: "t1",
+ role: "tool",
+ content: "plain text result",
+ toolCallId: "tc1",
+ } as ToolMessage,
+ ];
+
+ const result = toApi(messages) as ToolModelMessage[];
+
+ expect(result[0]!.content[0]).toEqual(
+ expect.objectContaining({
+ type: "tool-result",
+ output: { type: "text", value: "plain text result" },
+ }),
+ );
+ });
+
+ it("handles non-consecutive tool messages as separate groups", () => {
+ const messages: Message[] = [
+ { id: "t1", role: "tool", content: '"a"', toolCallId: "tc1" } as ToolMessage,
+ { id: "a1", role: "assistant", content: "middle" } as AssistantMessage,
+ { id: "t2", role: "tool", content: '"b"', toolCallId: "tc2" } as ToolMessage,
+ ];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toHaveLength(3);
+ expect(result[0]!.role).toBe("tool");
+ expect(result[1]!.role).toBe("assistant");
+ expect(result[2]!.role).toBe("tool");
+ });
+
+ it("converts multipart user content with text and binary", () => {
+ const messages: Message[] = [
+ {
+ id: "u1",
+ role: "user",
+ content: [
+ { type: "text", text: "Describe this" },
+ {
+ type: "binary",
+ mimeType: "image/png",
+ url: "https://example.com/img.png",
+ },
+ ],
+ } as UserMessage,
+ ];
+
+ const result = toApi(messages) as UserModelMessage[];
+
+ expect(result[0]!.content).toEqual([
+ { type: "text", text: "Describe this" },
+ {
+ type: "file",
+ data: new URL("https://example.com/img.png"),
+ mediaType: "image/png",
+ },
+ ]);
+ });
+
+ it("builds data URI for binary content with base64 data", () => {
+ const messages: Message[] = [
+ {
+ id: "u1",
+ role: "user",
+ content: [
+ {
+ type: "binary",
+ mimeType: "image/jpeg",
+ data: "abc123",
+ },
+ ],
+ } as UserMessage,
+ ];
+
+ const result = toApi(messages) as UserModelMessage[];
+ const part = (result[0]!.content as Array<{ type: string }>)[0];
+
+ expect(part).toEqual({
+ type: "file",
+ data: new URL("data:image/jpeg;base64,abc123"),
+ mediaType: "image/jpeg",
+ });
+ });
+
+ it("converts assistant message with empty content to empty string", () => {
+ const messages: Message[] = [{ id: "a1", role: "assistant" } as AssistantMessage];
+
+ const result = toApi(messages) as AssistantModelMessage[];
+
+ expect(result[0]!.content).toBe("");
+ });
+
+ it("skips unknown roles like activity and reasoning", () => {
+ const messages: Message[] = [
+ { id: "act1", role: "activity", activityType: "search", content: {} } as unknown as Message,
+ { id: "r1", role: "reasoning", content: "thinking..." } as unknown as Message,
+ { id: "u1", role: "user", content: "Hello" } as UserMessage,
+ ];
+
+ const result = toApi(messages) as ModelMessage[];
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.role).toBe("user");
+ });
+ });
+
+ // ── fromApi ──────────────────────────────────────────────────────
+
+ describe("fromApi", () => {
+ it("converts a text user message", () => {
+ const data: ModelMessage[] = [{ role: "user", content: "Hello" }];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.role).toBe("user");
+ expect((result[0] as UserMessage).content).toBe("Hello");
+ expect(result[0]!.id).toBeTruthy();
+ });
+
+ it("converts a text assistant message (string content)", () => {
+ const data: ModelMessage[] = [{ role: "assistant", content: "Hi there" }];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.role).toBe("assistant");
+ expect((result[0] as AssistantMessage).content).toBe("Hi there");
+ });
+
+ it("converts a system message", () => {
+ const data: ModelMessage[] = [{ role: "system", content: "You are helpful" }];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect(result[0]!.role).toBe("system");
+ expect((result[0] as SystemMessage).content).toBe("You are helpful");
+ });
+
+ it("converts assistant message with tool call parts", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "Checking..." },
+ {
+ type: "tool-call",
+ toolCallId: "tc1",
+ toolName: "get_weather",
+ input: { city: "NYC" },
+ },
+ ],
+ },
+ ];
+
+ const result = fromApi(data);
+ const msg = result[0] as AssistantMessage;
+
+ expect(msg.role).toBe("assistant");
+ expect(msg.content).toBe("Checking...");
+ expect(msg.toolCalls).toEqual([
+ {
+ id: "tc1",
+ type: "function",
+ function: {
+ name: "get_weather",
+ arguments: '{"city":"NYC"}',
+ },
+ },
+ ]);
+ });
+
+ it("converts assistant with only tool calls (no text)", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "assistant",
+ content: [
+ {
+ type: "tool-call",
+ toolCallId: "tc1",
+ toolName: "search",
+ input: { q: "test" },
+ },
+ ],
+ },
+ ];
+
+ const result = fromApi(data);
+ const msg = result[0] as AssistantMessage;
+
+ expect(msg.content).toBeUndefined();
+ expect(msg.toolCalls).toHaveLength(1);
+ });
+
+ it("expands ToolModelMessage into multiple AG-UI ToolMessages", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "get_weather",
+ output: { type: "json", value: { temp: 72 } },
+ },
+ {
+ type: "tool-result",
+ toolCallId: "tc2",
+ toolName: "calculator",
+ output: { type: "text", value: "4" },
+ },
+ ],
+ },
+ ];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(2);
+
+ const t1 = result[0] as ToolMessage;
+ expect(t1.role).toBe("tool");
+ expect(t1.toolCallId).toBe("tc1");
+ expect(t1.content).toBe('{"temp":72}');
+
+ const t2 = result[1] as ToolMessage;
+ expect(t2.role).toBe("tool");
+ expect(t2.toolCallId).toBe("tc2");
+ expect(t2.content).toBe("4");
+ });
+
+ it("handles error-text ToolResultOutput", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "failing_tool",
+ output: { type: "error-text", value: "Something went wrong" },
+ },
+ ],
+ } as ToolModelMessage,
+ ];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect((result[0]! as ToolMessage).content).toBe("Something went wrong");
+ });
+
+ it("handles error-json ToolResultOutput", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "failing_tool",
+ output: { type: "error-json", value: { code: 500, msg: "fail" } },
+ },
+ ],
+ } as ToolModelMessage,
+ ];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect((result[0]! as ToolMessage).content).toBe('{"code":500,"msg":"fail"}');
+ });
+
+ it("handles execution-denied ToolResultOutput with reason", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "dangerous_tool",
+ output: { type: "execution-denied", reason: "User declined" },
+ },
+ ],
+ } as ToolModelMessage,
+ ];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect((result[0]! as ToolMessage).content).toBe("User declined");
+ });
+
+ it("handles execution-denied ToolResultOutput without reason", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "tc1",
+ toolName: "dangerous_tool",
+ output: { type: "execution-denied" },
+ },
+ ],
+ } as ToolModelMessage,
+ ];
+
+ const result = fromApi(data);
+
+ expect(result).toHaveLength(1);
+ expect((result[0]! as ToolMessage).content).toBe("Tool execution denied");
+ });
+
+ it("converts multipart user content with text and file parts", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "user",
+ content: [
+ { type: "text", text: "Describe this" },
+ {
+ type: "file",
+ data: new URL("https://example.com/img.png"),
+ mediaType: "image/png",
+ },
+ ],
+ },
+ ];
+
+ const result = fromApi(data);
+ const msg = result[0] as UserMessage;
+
+ expect(Array.isArray(msg.content)).toBe(true);
+ const parts = msg.content as Array<{ type: string }>;
+ expect(parts[0]).toEqual({ type: "text", text: "Describe this" });
+ expect(parts[1]).toEqual({
+ type: "binary",
+ mimeType: "image/png",
+ url: "https://example.com/img.png",
+ });
+ });
+
+ it("converts multipart user content with image parts", () => {
+ const data: ModelMessage[] = [
+ {
+ role: "user",
+ content: [
+ {
+ type: "image",
+ image: new URL("https://example.com/photo.jpg"),
+ mediaType: "image/jpeg",
+ },
+ ],
+ },
+ ];
+
+ const result = fromApi(data);
+ const msg = result[0] as UserMessage;
+ const parts = msg.content as Array<{ type: string; mimeType: string; url: string }>;
+
+ expect(parts[0]).toEqual({
+ type: "binary",
+ mimeType: "image/jpeg",
+ url: "https://example.com/photo.jpg",
+ });
+ });
+
+ it("generates unique IDs for each message", () => {
+ const data: ModelMessage[] = [
+ { role: "user", content: "a" },
+ { role: "user", content: "b" },
+ ];
+
+ const result = fromApi(data);
+
+ expect(result[0]!.id).toBeTruthy();
+ expect(result[1]!.id).toBeTruthy();
+ expect(result[0]!.id).not.toBe(result[1]!.id);
+ });
+ });
+
+ // ── Round-trip ───────────────────────────────────────────────────
+
+ describe("round-trip", () => {
+ it("preserves text messages through toApi → fromApi", () => {
+ const original: Message[] = [
+ { id: "u1", role: "user", content: "Hello" },
+ { id: "a1", role: "assistant", content: "Hi there" },
+ { id: "s1", role: "system", content: "Be helpful" },
+ ];
+
+ const roundTripped = fromApi(toApi(original));
+
+ expect(roundTripped).toHaveLength(3);
+ expect((roundTripped[0] as UserMessage).content).toBe("Hello");
+ expect((roundTripped[1] as AssistantMessage).content).toBe("Hi there");
+ expect((roundTripped[2] as SystemMessage).content).toBe("Be helpful");
+ });
+
+ it("preserves tool calls through toApi → fromApi", () => {
+ const original: Message[] = [
+ {
+ id: "a1",
+ role: "assistant",
+ content: "Checking",
+ toolCalls: [
+ {
+ id: "tc1",
+ type: "function",
+ function: { name: "search", arguments: '{"q":"weather"}' },
+ },
+ ],
+ } as AssistantMessage,
+ ];
+
+ const roundTripped = fromApi(toApi(original));
+ const msg = roundTripped[0] as AssistantMessage;
+
+ expect(msg.content).toBe("Checking");
+ expect(msg.toolCalls).toEqual([
+ {
+ id: "tc1",
+ type: "function",
+ function: { name: "search", arguments: '{"q":"weather"}' },
+ },
+ ]);
+ });
+
+ it("preserves tool results through toApi → fromApi", () => {
+ const original: Message[] = [
+ {
+ id: "t1",
+ role: "tool",
+ content: '{"temp":72}',
+ toolCallId: "tc1",
+ } as ToolMessage,
+ {
+ id: "t2",
+ role: "tool",
+ content: '{"result":4}',
+ toolCallId: "tc2",
+ } as ToolMessage,
+ ];
+
+ const roundTripped = fromApi(toApi(original));
+
+ expect(roundTripped).toHaveLength(2);
+ expect((roundTripped[0] as ToolMessage).toolCallId).toBe("tc1");
+ expect((roundTripped[0] as ToolMessage).content).toBe('{"temp":72}');
+ expect((roundTripped[1] as ToolMessage).toolCallId).toBe("tc2");
+ expect((roundTripped[1] as ToolMessage).content).toBe('{"result":4}');
+ });
+
+ it("preserves a full conversation through toApi → fromApi", () => {
+ const original: Message[] = [
+ { id: "s1", role: "system", content: "You are helpful" } as SystemMessage,
+ { id: "u1", role: "user", content: "What's the weather?" } as UserMessage,
+ {
+ id: "a1",
+ role: "assistant",
+ content: "Let me check",
+ toolCalls: [
+ {
+ id: "tc1",
+ type: "function",
+ function: { name: "get_weather", arguments: '{"city":"SF"}' },
+ },
+ ],
+ } as AssistantMessage,
+ {
+ id: "t1",
+ role: "tool",
+ content: '{"temp":65}',
+ toolCallId: "tc1",
+ } as ToolMessage,
+ {
+ id: "a2",
+ role: "assistant",
+ content: "It's 65°F in SF",
+ } as AssistantMessage,
+ ];
+
+ const roundTripped = fromApi(toApi(original));
+
+ expect(roundTripped).toHaveLength(5);
+ expect(roundTripped.map((m) => m.role)).toEqual([
+ "system",
+ "user",
+ "assistant",
+ "tool",
+ "assistant",
+ ]);
+
+ const assistant1 = roundTripped[2] as AssistantMessage;
+ expect(assistant1.toolCalls?.[0]!.function.name).toBe("get_weather");
+
+ const tool1 = roundTripped[3] as ToolMessage;
+ expect(tool1.toolCallId).toBe("tc1");
+ expect(tool1.content).toBe('{"temp":65}');
+ });
+ });
+});
diff --git a/packages/react-headless/src/stream/adapters/__tests__/vercel-ai.test.ts b/packages/react-headless/src/stream/adapters/__tests__/vercel-ai.test.ts
new file mode 100644
index 000000000..50d9e388d
--- /dev/null
+++ b/packages/react-headless/src/stream/adapters/__tests__/vercel-ai.test.ts
@@ -0,0 +1,754 @@
+import { describe, expect, it, vi } from "vitest";
+import { AGUIEvent, EventType } from "../../../types";
+import { vercelAIAdapter } from "../vercel-ai";
+
+function sseLines(...events: (Record | string)[]): string {
+ return (
+ events
+ .map((e) => (typeof e === "string" ? `data: ${e}` : `data: ${JSON.stringify(e)}`))
+ .join("\n\n") + "\n\n"
+ );
+}
+
+function makeResponse(body: string): Response {
+ const encoder = new TextEncoder();
+ const stream = new ReadableStream({
+ start(controller) {
+ controller.enqueue(encoder.encode(body));
+ controller.close();
+ },
+ });
+ return new Response(stream);
+}
+
+function makeChunkedResponse(...chunks: string[]): Response {
+ const encoder = new TextEncoder();
+ const stream = new ReadableStream({
+ start(controller) {
+ for (const chunk of chunks) {
+ controller.enqueue(encoder.encode(chunk));
+ }
+ controller.close();
+ },
+ });
+ return new Response(stream);
+}
+
+async function collectEvents(response: Response) {
+ const adapter = vercelAIAdapter();
+ const events: AGUIEvent[] = [];
+ for await (const event of adapter.parse(response)) {
+ events.push(event);
+ }
+ return events;
+}
+
+describe("vercelAIAdapter", () => {
+ describe("text message flow", () => {
+ it("maps start → text-delta → finish to AG-UI events", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "msg-1" },
+ { type: "text-delta", id: "text-1", delta: "Hello" },
+ { type: "text-delta", id: "text-1", delta: " world" },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "msg-1", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "msg-1", delta: "Hello" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "msg-1", delta: " world" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "msg-1" },
+ ]);
+ });
+
+ it("generates a UUID when messageId is missing from start event", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start" },
+ { type: "text-delta", id: "t1", delta: "Hi" },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(3);
+ expect((events[0] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_START);
+ const msgId = (events[0] as { messageId: string }).messageId;
+ expect(msgId).toBeTruthy();
+ expect(msgId).not.toBe("");
+ expect((events[1] as { messageId: string }).messageId).toBe(msgId);
+ expect((events[2] as { messageId: string }).messageId).toBe(msgId);
+ });
+ });
+
+ describe("tool call flow", () => {
+ it("maps tool-input-start → delta → available to AG-UI events", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "msg-2" },
+ { type: "tool-input-start", toolCallId: "call-1", toolName: "get_weather" },
+ { type: "tool-input-delta", toolCallId: "call-1", inputTextDelta: '{"city":' },
+ { type: "tool-input-delta", toolCallId: "call-1", inputTextDelta: '"SF"}' },
+ {
+ type: "tool-input-available",
+ toolCallId: "call-1",
+ toolName: "get_weather",
+ input: { city: "SF" },
+ },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "msg-2", role: "assistant" },
+ { type: EventType.TOOL_CALL_START, toolCallId: "call-1", toolCallName: "get_weather" },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-1", delta: '{"_request":' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-1", delta: '{"city":' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-1", delta: '"SF"}' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-1", delta: "}" },
+ { type: EventType.TOOL_CALL_END, toolCallId: "call-1" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "msg-2" },
+ ]);
+ });
+ });
+
+ describe("error handling", () => {
+ it("maps error event to RUN_ERROR", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "msg-3" },
+ { type: "error", errorText: "Rate limit exceeded" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "msg-3", role: "assistant" },
+ { type: EventType.RUN_ERROR, message: "Rate limit exceeded" },
+ ]);
+ });
+
+ it("maps abort event to RUN_ERROR with reason", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "msg-4" },
+ { type: "abort", reason: "user cancelled" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events[1]).toEqual({
+ type: EventType.RUN_ERROR,
+ message: "user cancelled",
+ });
+ });
+
+ it("maps abort event without reason to default message", async () => {
+ const response = makeResponse(sseLines({ type: "abort" }));
+
+ const events = await collectEvents(response);
+
+ expect(events[0]).toEqual({
+ type: EventType.RUN_ERROR,
+ message: "Stream aborted",
+ });
+ });
+
+ it("maps tool-input-error to RUN_ERROR", async () => {
+ const response = makeResponse(
+ sseLines({
+ type: "tool-input-error",
+ toolCallId: "call-1",
+ toolName: "get_weather",
+ input: {},
+ errorText: "Invalid input schema",
+ }),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events[0]).toEqual({
+ type: EventType.RUN_ERROR,
+ message: "Invalid input schema",
+ });
+ });
+ });
+
+ describe("SSE parsing", () => {
+ it("ignores [DONE] sentinel", async () => {
+ const response = makeResponse(
+ sseLines({ type: "start", messageId: "msg-5" }, { type: "finish" }) + "data: [DONE]\n\n",
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(2);
+ expect((events[0] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_START);
+ expect((events[1] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_END);
+ });
+
+ it("ignores non-data lines", async () => {
+ const body =
+ "event: message\ndata: " + JSON.stringify({ type: "start", messageId: "m1" }) + "\n\n";
+ const response = makeResponse(body);
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(1);
+ expect((events[0] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_START);
+ });
+
+ it("handles chunks split across reads", async () => {
+ const fullLine = `data: ${JSON.stringify({ type: "start", messageId: "msg-6" })}\n\n`;
+ const splitAt = Math.floor(fullLine.length / 2);
+
+ const response = makeChunkedResponse(fullLine.slice(0, splitAt), fullLine.slice(splitAt));
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(1);
+ expect(events[0]).toEqual({
+ type: EventType.TEXT_MESSAGE_START,
+ messageId: "msg-6",
+ role: "assistant",
+ });
+ });
+
+ it("handles JSON split mid-line across chunks", async () => {
+ const line1 = `data: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "hello" })}`;
+ const response = makeChunkedResponse(
+ `data: ${JSON.stringify({ type: "start", messageId: "m1" })}\n\n`,
+ line1.slice(0, 10),
+ line1.slice(10) + "\n\n",
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(3);
+ expect((events[0] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_START);
+ expect(events[1]).toEqual({
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "m1",
+ delta: "hello",
+ });
+ expect((events[2] as { type: EventType }).type).toBe(EventType.TEXT_MESSAGE_END);
+ });
+
+ it("recovers from malformed JSON without crashing", async () => {
+ const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
+
+ const response = makeResponse(
+ "data: not-json\n\n" + `data: ${JSON.stringify({ type: "start", messageId: "m1" })}\n\n`,
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(1);
+ expect(events[0]?.type).toBe(EventType.TEXT_MESSAGE_START);
+ expect(consoleSpy).toHaveBeenCalledWith(
+ "Failed to parse Vercel AI SSE event",
+ expect.any(Error),
+ );
+
+ consoleSpy.mockRestore();
+ });
+
+ it("throws when response has no body", async () => {
+ const response = new Response(null);
+ const adapter = vercelAIAdapter();
+
+ await expect(async () => {
+ for await (const _ of adapter.parse(response)) {
+ /* drain */
+ }
+ }).rejects.toThrow("No response body");
+ });
+
+ it("processes remaining buffer after stream ends without trailing newline", async () => {
+ const body = `data: ${JSON.stringify({ type: "start", messageId: "msg-tail" })}`;
+ const response = makeResponse(body);
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(1);
+ expect(events[0]).toEqual({
+ type: EventType.TEXT_MESSAGE_START,
+ messageId: "msg-tail",
+ role: "assistant",
+ });
+ });
+ });
+
+ describe("unhandled events", () => {
+ it("silently skips lifecycle events like text-start, text-end, start-step, finish-step", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "m1" },
+ { type: "text-start", id: "t1" },
+ { type: "text-delta", id: "t1", delta: "Hi" },
+ { type: "text-end", id: "t1" },
+ { type: "start-step" },
+ { type: "finish-step" },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "m1", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "m1", delta: "Hi" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "m1" },
+ ]);
+ });
+ });
+
+ describe("multi-byte UTF-8 split across chunks", () => {
+ it("reassembles emoji split at byte boundary", async () => {
+ const emoji = "🔥";
+ const json = JSON.stringify({ type: "text-delta", id: "t1", delta: emoji });
+ const sseLine = `data: ${json}\n\n`;
+ const bytes = new TextEncoder().encode(sseLine);
+
+ const startPayload = new TextEncoder().encode(
+ `data: ${JSON.stringify({ type: "start", messageId: "utf8-1" })}\n\n`,
+ );
+ const finishPayload = new TextEncoder().encode(
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ );
+
+ // Split the emoji SSE line mid-emoji (🔥 is 4 bytes in UTF-8)
+ const emojiStart = sseLine.indexOf(emoji);
+ const byteOffset = new TextEncoder().encode(sseLine.slice(0, emojiStart)).length + 2;
+
+ const stream = new ReadableStream({
+ start(controller) {
+ controller.enqueue(startPayload);
+ controller.enqueue(bytes.slice(0, byteOffset));
+ controller.enqueue(bytes.slice(byteOffset));
+ controller.enqueue(finishPayload);
+ controller.close();
+ },
+ });
+
+ const events = await collectEvents(new Response(stream));
+
+ expect(events).toHaveLength(3);
+ expect(events[1]).toEqual({
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "utf8-1",
+ delta: emoji,
+ });
+ });
+
+ it("reassembles CJK character split at byte boundary", async () => {
+ const cjk = "你好世界";
+ const json = JSON.stringify({ type: "text-delta", id: "t1", delta: cjk });
+ const sseLine = `data: ${json}\n\n`;
+ const bytes = new TextEncoder().encode(sseLine);
+
+ const startPayload = new TextEncoder().encode(
+ `data: ${JSON.stringify({ type: "start", messageId: "cjk-1" })}\n\n`,
+ );
+ const finishPayload = new TextEncoder().encode(
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ );
+
+ // Split mid-way through a multi-byte char (each CJK char is 3 bytes)
+ const cjkStart = sseLine.indexOf(cjk);
+ const byteOffset = new TextEncoder().encode(sseLine.slice(0, cjkStart)).length + 4;
+
+ const stream = new ReadableStream({
+ start(controller) {
+ controller.enqueue(startPayload);
+ controller.enqueue(bytes.slice(0, byteOffset));
+ controller.enqueue(bytes.slice(byteOffset));
+ controller.enqueue(finishPayload);
+ controller.close();
+ },
+ });
+
+ const events = await collectEvents(new Response(stream));
+
+ expect(events).toHaveLength(3);
+ expect(events[1]).toEqual({
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "cjk-1",
+ delta: cjk,
+ });
+ });
+
+ it("handles emoji in the final buffer after stream ends", async () => {
+ const json = JSON.stringify({ type: "text-delta", id: "t1", delta: "done✅" });
+ const sseLine = `data: ${json}`;
+ const bytes = new TextEncoder().encode(sseLine);
+
+ // Split so the ✅ (3 bytes) is cut mid-character
+ const checkStart = sseLine.indexOf("✅");
+ const byteOffset = new TextEncoder().encode(sseLine.slice(0, checkStart)).length + 1;
+
+ const startPayload = new TextEncoder().encode(
+ `data: ${JSON.stringify({ type: "start", messageId: "utf8-tail" })}\n\n`,
+ );
+
+ const stream = new ReadableStream({
+ start(controller) {
+ controller.enqueue(startPayload);
+ controller.enqueue(bytes.slice(0, byteOffset));
+ controller.enqueue(bytes.slice(byteOffset));
+ controller.close();
+ },
+ });
+
+ const events = await collectEvents(new Response(stream));
+
+ expect(events).toHaveLength(2);
+ expect(events[1]).toEqual({
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "utf8-tail",
+ delta: "done✅",
+ });
+ });
+ });
+
+ describe("interleaved tool calls and text", () => {
+ it("maps multiple tool calls interleaved with text in a single stream", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "msg-interleave" },
+ { type: "text-delta", id: "t1", delta: "Let me look up " },
+ { type: "text-delta", id: "t1", delta: "two things." },
+ { type: "tool-input-start", toolCallId: "call-a", toolName: "search" },
+ { type: "tool-input-delta", toolCallId: "call-a", inputTextDelta: '{"q":"weather"}' },
+ {
+ type: "tool-input-available",
+ toolCallId: "call-a",
+ toolName: "search",
+ input: { q: "weather" },
+ },
+ { type: "tool-input-start", toolCallId: "call-b", toolName: "calculator" },
+ { type: "tool-input-delta", toolCallId: "call-b", inputTextDelta: '{"expr":' },
+ { type: "tool-input-delta", toolCallId: "call-b", inputTextDelta: '"2+2"}' },
+ {
+ type: "tool-input-available",
+ toolCallId: "call-b",
+ toolName: "calculator",
+ input: { expr: "2+2" },
+ },
+ { type: "text-delta", id: "t2", delta: "Here are the results." },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "msg-interleave", role: "assistant" },
+ {
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "msg-interleave",
+ delta: "Let me look up ",
+ },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "msg-interleave", delta: "two things." },
+ { type: EventType.TOOL_CALL_START, toolCallId: "call-a", toolCallName: "search" },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-a", delta: '{"_request":' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-a", delta: '{"q":"weather"}' },
+ { type: EventType.TOOL_CALL_START, toolCallId: "call-b", toolCallName: "calculator" },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-b", delta: '{"_request":' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-b", delta: '{"expr":' },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-b", delta: '"2+2"}' },
+ {
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "msg-interleave",
+ delta: "Here are the results.",
+ },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-a", delta: "}" },
+ { type: EventType.TOOL_CALL_END, toolCallId: "call-a" },
+ { type: EventType.TOOL_CALL_ARGS, toolCallId: "call-b", delta: "}" },
+ { type: EventType.TOOL_CALL_END, toolCallId: "call-b" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "msg-interleave" },
+ ]);
+ });
+ });
+
+ describe("empty stream", () => {
+ it("emits only start and end for a stream with no content events", async () => {
+ const response = makeResponse(
+ sseLines({ type: "start", messageId: "empty-1" }, { type: "finish" }),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "empty-1", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "empty-1" },
+ ]);
+ });
+
+ it("produces no events from a completely empty body", async () => {
+ const response = makeResponse("");
+ const events = await collectEvents(response);
+ expect(events).toEqual([]);
+ });
+
+ it("produces no events from a body with only whitespace and newlines", async () => {
+ const response = makeResponse("\n\n \n\n");
+ const events = await collectEvents(response);
+ expect(events).toEqual([]);
+ });
+ });
+
+ describe("multiple consecutive errors", () => {
+ it("emits all RUN_ERROR events for consecutive errors", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "err-multi" },
+ { type: "error", errorText: "Rate limit" },
+ { type: "error", errorText: "Timeout" },
+ { type: "error", errorText: "Internal error" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "err-multi", role: "assistant" },
+ { type: EventType.RUN_ERROR, message: "Rate limit" },
+ { type: EventType.RUN_ERROR, message: "Timeout" },
+ { type: EventType.RUN_ERROR, message: "Internal error" },
+ ]);
+ });
+
+ it("emits errors from mixed error and abort events", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "err-mix" },
+ { type: "error", errorText: "Failed" },
+ { type: "abort", reason: "cancelled" },
+ {
+ type: "tool-input-error",
+ toolCallId: "c1",
+ toolName: "t",
+ input: {},
+ errorText: "bad input",
+ },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "err-mix", role: "assistant" },
+ { type: EventType.RUN_ERROR, message: "Failed" },
+ { type: EventType.RUN_ERROR, message: "cancelled" },
+ { type: EventType.RUN_ERROR, message: "bad input" },
+ ]);
+ });
+ });
+
+ describe("large payloads", () => {
+ it("handles a very large text delta in a single SSE line", async () => {
+ const largeText = "x".repeat(100_000);
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "big-1" },
+ { type: "text-delta", id: "t1", delta: largeText },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(3);
+ expect(events[1]).toEqual({
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId: "big-1",
+ delta: largeText,
+ });
+ });
+
+ it("handles large tool call args payload", async () => {
+ const largeArgs = JSON.stringify({ data: "y".repeat(50_000) });
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "big-tool" },
+ { type: "tool-input-start", toolCallId: "c1", toolName: "big_fn" },
+ { type: "tool-input-delta", toolCallId: "c1", inputTextDelta: largeArgs },
+ {
+ type: "tool-input-available",
+ toolCallId: "c1",
+ toolName: "big_fn",
+ input: { data: "y".repeat(50_000) },
+ },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toHaveLength(7);
+ expect(events[2]).toEqual({
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: "c1",
+ delta: '{"_request":',
+ });
+ expect(events[3]).toEqual({
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: "c1",
+ delta: largeArgs,
+ });
+ });
+ });
+
+ describe("whitespace and empty lines between data lines", () => {
+ it("handles extra blank lines between SSE data lines", async () => {
+ const body =
+ `data: ${JSON.stringify({ type: "start", messageId: "ws-1" })}\n\n` +
+ "\n\n\n" +
+ `data: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "ok" })}\n\n` +
+ "\n" +
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`;
+
+ const response = makeResponse(body);
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "ws-1", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "ws-1", delta: "ok" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "ws-1" },
+ ]);
+ });
+
+ it("handles lines with only whitespace interspersed", async () => {
+ const body =
+ `data: ${JSON.stringify({ type: "start", messageId: "ws-2" })}\n\n` +
+ " \n" +
+ " \n\n" +
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`;
+
+ const response = makeResponse(body);
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "ws-2", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "ws-2" },
+ ]);
+ });
+ });
+
+ describe("additional edge cases", () => {
+ it("handles text delta with empty string", async () => {
+ const response = makeResponse(
+ sseLines(
+ { type: "start", messageId: "empty-delta" },
+ { type: "text-delta", id: "t1", delta: "" },
+ { type: "finish" },
+ ),
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "empty-delta", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "empty-delta", delta: "" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "empty-delta" },
+ ]);
+ });
+
+ it("handles multiple [DONE] sentinels gracefully", async () => {
+ const response = makeResponse(
+ sseLines({ type: "start", messageId: "done-multi" }, { type: "finish" }) +
+ "data: [DONE]\n\ndata: [DONE]\n\n",
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "done-multi", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "done-multi" },
+ ]);
+ });
+
+ it("handles data line with extra whitespace after JSON", async () => {
+ const response = makeResponse(
+ `data: ${JSON.stringify({ type: "start", messageId: "trim-1" })} \n\n` +
+ `data: ${JSON.stringify({ type: "finish" })} \n\n`,
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "trim-1", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "trim-1" },
+ ]);
+ });
+
+ it("multiple malformed lines don't prevent valid events from being emitted", async () => {
+ const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
+
+ const response = makeResponse(
+ "data: {broken\n\n" +
+ "data: also-broken\n\n" +
+ `data: ${JSON.stringify({ type: "start", messageId: "resilient" })}\n\n` +
+ "data: {\n\n" +
+ `data: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "hi" })}\n\n` +
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "resilient", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "resilient", delta: "hi" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "resilient" },
+ ]);
+ expect(consoleSpy).toHaveBeenCalledTimes(3);
+
+ consoleSpy.mockRestore();
+ });
+
+ it("handles stream where every line arrives as a separate chunk", async () => {
+ const lines = [
+ `data: ${JSON.stringify({ type: "start", messageId: "byte-by-byte" })}\n\n`,
+ `data: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "a" })}\n\n`,
+ `data: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "b" })}\n\n`,
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ ];
+
+ const response = makeChunkedResponse(...lines);
+ const events = await collectEvents(response);
+
+ expect(events).toEqual([
+ { type: EventType.TEXT_MESSAGE_START, messageId: "byte-by-byte", role: "assistant" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "byte-by-byte", delta: "a" },
+ { type: EventType.TEXT_MESSAGE_CONTENT, messageId: "byte-by-byte", delta: "b" },
+ { type: EventType.TEXT_MESSAGE_END, messageId: "byte-by-byte" },
+ ]);
+ });
+
+ it("uses consistent messageId across events when start is in a later chunk", async () => {
+ const response = makeChunkedResponse(
+ `data: ${JSON.stringify({ type: "start", messageId: "late-start" })}\n`,
+ `\ndata: ${JSON.stringify({ type: "text-delta", id: "t1", delta: "yo" })}\n\n`,
+ `data: ${JSON.stringify({ type: "finish" })}\n\n`,
+ );
+
+ const events = await collectEvents(response);
+
+ expect(events.every((e) => ("messageId" in e ? e.messageId === "late-start" : true))).toBe(
+ true,
+ );
+ expect(events).toHaveLength(3);
+ });
+ });
+});
diff --git a/packages/react-headless/src/stream/adapters/index.ts b/packages/react-headless/src/stream/adapters/index.ts
index 0c5934197..6a8435dd3 100644
--- a/packages/react-headless/src/stream/adapters/index.ts
+++ b/packages/react-headless/src/stream/adapters/index.ts
@@ -4,3 +4,5 @@ export * from "./openai-conversation-message-format";
export * from "./openai-message-format";
export * from "./openai-readable-stream";
export * from "./openai-responses";
+export * from "./vercel-ai";
+export * from "./vercel-ai-message-format";
diff --git a/packages/react-headless/src/stream/adapters/vercel-ai-message-format.ts b/packages/react-headless/src/stream/adapters/vercel-ai-message-format.ts
new file mode 100644
index 000000000..4396bcb14
--- /dev/null
+++ b/packages/react-headless/src/stream/adapters/vercel-ai-message-format.ts
@@ -0,0 +1,304 @@
+import type {
+ AssistantModelMessage,
+ FilePart,
+ ModelMessage,
+ SystemModelMessage,
+ TextPart,
+ ToolCallPart,
+ ToolModelMessage,
+ ToolResultPart,
+ UserModelMessage,
+} from "ai";
+import type {
+ AssistantMessage,
+ BinaryInputContent,
+ InputContent,
+ Message,
+ ToolCall,
+ ToolMessage,
+ UserMessage,
+} from "../../types";
+import type { MessageFormat } from "../../types/messageFormat";
+
+// ── Outbound (AG-UI → Vercel AI ModelMessage[]) ─────────────────
+
+function toVercelUserMessage(message: UserMessage): UserModelMessage {
+ const { content } = message;
+
+ if (typeof content === "string") {
+ return { role: "user", content };
+ }
+
+ const parts: (TextPart | FilePart)[] =
+ content?.map((part: InputContent): TextPart | FilePart => {
+ if (part.type === "text") {
+ return { type: "text", text: part.text };
+ }
+ const url = part.url ?? `data:${part.mimeType};base64,${part.data ?? ""}`;
+ return {
+ type: "file",
+ data: new URL(url),
+ mediaType: part.mimeType,
+ };
+ }) ?? [];
+
+ return { role: "user", content: parts };
+}
+
+function toVercelAssistantMessage(message: AssistantMessage): AssistantModelMessage {
+ const parts: (TextPart | ToolCallPart)[] = [];
+
+ if (message.content) {
+ parts.push({ type: "text", text: message.content });
+ }
+
+ if (message.toolCalls?.length) {
+ for (const tc of message.toolCalls) {
+ let input: unknown;
+ try {
+ input = JSON.parse(tc.function.arguments);
+ } catch {
+ input = {};
+ }
+ parts.push({
+ type: "tool-call",
+ toolCallId: tc.id,
+ toolName: tc.function.name,
+ input,
+ });
+ }
+ }
+
+ return { role: "assistant", content: parts.length ? parts : "" };
+}
+
+/**
+ * Groups consecutive AG-UI ToolMessages into a single Vercel AI ToolModelMessage.
+ * Returns the number of messages consumed so the caller can skip ahead.
+ */
+function groupToolMessages(
+ messages: Message[],
+ startIndex: number,
+): { message: ToolModelMessage; consumed: number } {
+ const results: ToolResultPart[] = [];
+ let i = startIndex;
+
+ while (i < messages.length && messages[i]?.role === "tool") {
+ const tm = messages[i] as ToolMessage;
+ let output: ToolResultPart["output"];
+ try {
+ output = { type: "json", value: JSON.parse(tm.content) };
+ } catch {
+ output = { type: "text", value: tm.content };
+ }
+ results.push({
+ type: "tool-result",
+ toolCallId: tm.toolCallId,
+ toolName: "",
+ output,
+ });
+ i++;
+ }
+
+ return {
+ message: { role: "tool", content: results },
+ consumed: i - startIndex,
+ };
+}
+
+function toVercelMessages(messages: Message[]): ModelMessage[] {
+ const result: ModelMessage[] = [];
+ let i = 0;
+
+ while (i < messages.length) {
+ const msg = messages[i];
+
+ switch (msg?.role) {
+ case "user":
+ result.push(toVercelUserMessage(msg));
+ i++;
+ break;
+
+ case "assistant":
+ result.push(toVercelAssistantMessage(msg));
+ i++;
+ break;
+
+ case "tool": {
+ const { message, consumed } = groupToolMessages(messages, i);
+ result.push(message);
+ i += consumed;
+ break;
+ }
+
+ case "system":
+ result.push({ role: "system", content: msg.content } as SystemModelMessage);
+ i++;
+ break;
+
+ case "developer":
+ result.push({ role: "system", content: msg.content } as SystemModelMessage);
+ i++;
+ break;
+
+ default:
+ i++;
+ break;
+ }
+ }
+
+ return result;
+}
+
+// ── Inbound (Vercel AI ModelMessage[] → AG-UI) ──────────────────
+
+function fromVercelUserMessage(msg: UserModelMessage): UserMessage {
+ if (typeof msg.content === "string") {
+ return { id: crypto.randomUUID(), role: "user", content: msg.content };
+ }
+
+ const content: InputContent[] = msg.content.map((part): InputContent => {
+ if (part.type === "text") {
+ return { type: "text", text: part.text };
+ }
+ if (part.type === "file") {
+ const url = part.data instanceof URL ? part.data.toString() : undefined;
+ const binary: BinaryInputContent = {
+ type: "binary",
+ mimeType: part.mediaType,
+ url,
+ };
+ return binary;
+ }
+ // ImagePart → BinaryInputContent
+ if (part.type === "image") {
+ const url = part.image instanceof URL ? part.image.toString() : undefined;
+ return {
+ type: "binary",
+ mimeType: part.mediaType ?? "image/png",
+ url,
+ };
+ }
+ return { type: "text", text: "" };
+ });
+
+ return { id: crypto.randomUUID(), role: "user", content };
+}
+
+function fromVercelAssistantMessage(msg: AssistantModelMessage): AssistantMessage {
+ if (typeof msg.content === "string") {
+ return {
+ id: crypto.randomUUID(),
+ role: "assistant",
+ content: msg.content,
+ };
+ }
+
+ let text = "";
+ const toolCalls: ToolCall[] = [];
+
+ for (const part of msg.content) {
+ if (part.type === "text") {
+ text += part.text;
+ } else if (part.type === "tool-call") {
+ toolCalls.push({
+ id: part.toolCallId,
+ type: "function",
+ function: {
+ name: part.toolName,
+ arguments: JSON.stringify(part.input),
+ },
+ });
+ }
+ }
+
+ const result: AssistantMessage = {
+ id: crypto.randomUUID(),
+ role: "assistant",
+ content: text || undefined,
+ };
+
+ if (toolCalls.length) {
+ result.toolCalls = toolCalls;
+ }
+
+ return result;
+}
+
+function fromVercelToolMessage(msg: ToolModelMessage): ToolMessage[] {
+ return msg.content
+ .filter((part): part is ToolResultPart => part.type === "tool-result")
+ .map((part) => {
+ let content: string;
+ const { output } = part;
+ if (output.type === "text" || output.type === "error-text") {
+ content = output.value;
+ } else if (output.type === "json" || output.type === "error-json") {
+ content = JSON.stringify(output.value);
+ } else if (output.type === "execution-denied") {
+ content = output.reason ?? "Tool execution denied";
+ } else {
+ content = "";
+ }
+
+ return {
+ id: crypto.randomUUID(),
+ role: "tool" as const,
+ content,
+ toolCallId: part.toolCallId,
+ };
+ });
+}
+
+function fromVercelMessage(msg: ModelMessage): Message[] {
+ switch (msg.role) {
+ case "user":
+ return [fromVercelUserMessage(msg)];
+ case "assistant":
+ return [fromVercelAssistantMessage(msg)];
+ case "tool":
+ return fromVercelToolMessage(msg);
+ case "system":
+ return [
+ {
+ id: crypto.randomUUID(),
+ role: "system",
+ content: msg.content,
+ },
+ ];
+ default:
+ return [];
+ }
+}
+
+// ── MessageFormat implementation ─────────────────────────────────
+
+/**
+ * Converts between AG-UI message format and Vercel AI SDK
+ * `ModelMessage` format.
+ *
+ * Key differences from OpenAI format:
+ * - Tool calls are content parts (`ToolCallPart`) not a separate array
+ * - Tool results are batched in a single `ToolModelMessage` with
+ * multiple `ToolResultPart`s (AG-UI uses one `ToolMessage` per result)
+ * - Arguments are parsed objects (`input`) not JSON strings
+ *
+ * AG-UI → Vercel AI (toApi):
+ * - Consecutive `ToolMessage`s are grouped into one `ToolModelMessage`
+ * - `toolCalls[].function.arguments` (string) → `JSON.parse` → `input`
+ * - `DeveloperMessage` maps to `system` role (Vercel AI has no developer role)
+ *
+ * Vercel AI → AG-UI (fromApi):
+ * - Each `ToolResultPart` expands into a separate `ToolMessage`
+ * - `ToolCallPart.input` → `JSON.stringify` → `function.arguments`
+ * - Generates `id` via `crypto.randomUUID()`
+ */
+export const vercelAIMessageFormat: MessageFormat = {
+ toApi(messages: Message[]): ModelMessage[] {
+ return toVercelMessages(messages);
+ },
+
+ fromApi(data: unknown): Message[] {
+ return (data as ModelMessage[]).flatMap(fromVercelMessage);
+ },
+};
diff --git a/packages/react-headless/src/stream/adapters/vercel-ai.ts b/packages/react-headless/src/stream/adapters/vercel-ai.ts
new file mode 100644
index 000000000..280f9f041
--- /dev/null
+++ b/packages/react-headless/src/stream/adapters/vercel-ai.ts
@@ -0,0 +1,173 @@
+import type { UIMessageChunk } from "ai";
+import { AGUIEvent, EventType, StreamProtocolAdapter } from "../../types";
+
+interface ToolCallTracker {
+ inputComplete: boolean;
+}
+
+function* mapChunkToEvents(
+ event: UIMessageChunk,
+ messageId: string,
+ toolCalls: Map,
+): Generator {
+ switch (event.type) {
+ case "start":
+ yield {
+ type: EventType.TEXT_MESSAGE_START,
+ messageId,
+ role: "assistant",
+ };
+ break;
+
+ case "text-delta":
+ yield {
+ type: EventType.TEXT_MESSAGE_CONTENT,
+ messageId,
+ delta: event.delta,
+ };
+ break;
+
+ case "finish":
+ for (const [id] of toolCalls) {
+ yield {
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: id,
+ delta: "}",
+ };
+ yield { type: EventType.TOOL_CALL_END, toolCallId: id };
+ }
+ toolCalls.clear();
+
+ yield {
+ type: EventType.TEXT_MESSAGE_END,
+ messageId,
+ };
+ break;
+
+ case "tool-input-start":
+ toolCalls.set(event.toolCallId, { inputComplete: false });
+ yield {
+ type: EventType.TOOL_CALL_START,
+ toolCallId: event.toolCallId,
+ toolCallName: event.toolName,
+ };
+ yield {
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: event.toolCallId,
+ delta: '{"_request":',
+ };
+ break;
+
+ case "tool-input-delta":
+ yield {
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: event.toolCallId,
+ delta: event.inputTextDelta,
+ };
+ break;
+
+ case "tool-input-available": {
+ const state = toolCalls.get(event.toolCallId);
+ if (state) state.inputComplete = true;
+ break;
+ }
+
+ case "tool-output-available":
+ yield {
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: event.toolCallId,
+ delta: `,"_response":${JSON.stringify(event.output)}}`,
+ };
+ yield { type: EventType.TOOL_CALL_END, toolCallId: event.toolCallId };
+ toolCalls.delete(event.toolCallId);
+ break;
+
+ case "tool-output-error":
+ yield {
+ type: EventType.TOOL_CALL_ARGS,
+ toolCallId: event.toolCallId,
+ delta: `,"_error":${JSON.stringify(event.errorText)}}`,
+ };
+ yield { type: EventType.TOOL_CALL_END, toolCallId: event.toolCallId };
+ toolCalls.delete(event.toolCallId);
+ break;
+
+ case "error":
+ yield {
+ type: EventType.RUN_ERROR,
+ message: event.errorText,
+ };
+ break;
+
+ case "abort":
+ yield {
+ type: EventType.RUN_ERROR,
+ message: event.reason ?? "Stream aborted",
+ };
+ break;
+
+ case "tool-input-error":
+ yield {
+ type: EventType.RUN_ERROR,
+ message: event.errorText,
+ };
+ break;
+
+ default:
+ break;
+ }
+}
+
+function* parseSseLines(
+ lines: string[],
+ messageId: { value: string },
+ toolCalls: Map,
+): Generator {
+ for (const line of lines) {
+ if (!line.startsWith("data: ")) continue;
+ const data = line.slice(6).trim();
+ if (!data || data === "[DONE]") continue;
+
+ try {
+ const event = JSON.parse(data) as UIMessageChunk;
+
+ if (event.type === "start") {
+ messageId.value = event.messageId ?? crypto.randomUUID();
+ }
+
+ yield* mapChunkToEvents(event, messageId.value, toolCalls);
+ } catch (e) {
+ console.error("Failed to parse Vercel AI SSE event", e);
+ }
+ }
+}
+
+export const vercelAIAdapter = (): StreamProtocolAdapter => ({
+ async *parse(response: Response): AsyncIterable {
+ const reader = response.body?.getReader();
+ if (!reader) throw new Error("No response body");
+
+ const decoder = new TextDecoder();
+ const messageId = { value: "" };
+ const toolCalls = new Map();
+ let buffer = "";
+
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) {
+ buffer += decoder.decode();
+ break;
+ }
+
+ buffer += decoder.decode(value, { stream: true });
+ const lines = buffer.split("\n");
+ buffer = lines.pop() ?? "";
+
+ yield* parseSseLines(lines, messageId, toolCalls);
+ }
+
+ if (buffer.trim()) {
+ yield* parseSseLines([buffer], messageId, toolCalls);
+ }
+ },
+});
diff --git a/packages/react-headless/src/stream/processStreamedMessage.ts b/packages/react-headless/src/stream/processStreamedMessage.ts
index fb8002f69..ca57fcc05 100644
--- a/packages/react-headless/src/stream/processStreamedMessage.ts
+++ b/packages/react-headless/src/stream/processStreamedMessage.ts
@@ -92,7 +92,7 @@ export const processStreamedMessage = async ({
break;
case EventType.RUN_ERROR:
- console.error("Stream error:", (event as any).error);
+ console.error("Stream error:", (event as any).message);
break;
}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 1243b795f..7633e5f50 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -173,6 +173,61 @@ importers:
specifier: ^5
version: 5.9.3
+ examples/vercel-ai-chat:
+ dependencies:
+ '@ai-sdk/openai':
+ specifier: ^3.0.0
+ version: 3.0.39(zod@3.25.76)
+ '@openuidev/lang-react':
+ specifier: workspace:*
+ version: link:../../packages/lang-react
+ '@openuidev/react-headless':
+ specifier: workspace:*
+ version: link:../../packages/react-headless
+ '@openuidev/react-ui':
+ specifier: workspace:*
+ version: link:../../packages/react-ui
+ ai:
+ specifier: ^6.0.0
+ version: 6.0.111(zod@3.25.76)
+ next:
+ specifier: 16.1.6
+ version: 16.1.6(@babel/core@7.29.0)(@opentelemetry/api@1.9.0)(babel-plugin-react-compiler@1.0.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(sass@1.89.2)
+ react:
+ specifier: 19.2.3
+ version: 19.2.3
+ react-dom:
+ specifier: 19.2.3
+ version: 19.2.3(react@19.2.3)
+ zod:
+ specifier: ^3.25.76
+ version: 3.25.76
+ devDependencies:
+ '@tailwindcss/postcss':
+ specifier: ^4
+ version: 4.2.1
+ '@types/node':
+ specifier: ^20
+ version: 20.19.35
+ '@types/react':
+ specifier: ^19
+ version: 19.2.14
+ '@types/react-dom':
+ specifier: ^19
+ version: 19.2.3(@types/react@19.2.14)
+ eslint:
+ specifier: ^9
+ version: 9.29.0(jiti@2.6.1)
+ eslint-config-next:
+ specifier: 16.1.6
+ version: 16.1.6(@typescript-eslint/parser@8.56.1(eslint@9.29.0(jiti@2.6.1))(typescript@5.9.3))(eslint@9.29.0(jiti@2.6.1))(typescript@5.9.3)
+ tailwindcss:
+ specifier: ^4
+ version: 4.2.1
+ typescript:
+ specifier: ^5
+ version: 5.9.3
+
packages/create-openui-app:
dependencies:
'@inquirer/core':
@@ -232,6 +287,9 @@ importers:
'@types/react':
specifier: '>=17.0.0'
version: 19.2.14
+ ai:
+ specifier: ^6.0.108
+ version: 6.0.111(zod@3.25.67)
openai:
specifier: ^6.22.0
version: 6.22.0(ws@8.18.2)(zod@3.25.67)
@@ -467,6 +525,28 @@ packages:
'@ag-ui/core@0.0.45':
resolution: {integrity: sha512-Ccsarxb23TChONOWXDbNBqp1fIbOSMht8g7w6AsSYBTtdOwZ7h7AkjNkr3LSdVv+RbT30JMdSLtieJE0YepNPg==}
+ '@ai-sdk/gateway@3.0.63':
+ resolution: {integrity: sha512-0jwdkN3elC4Q9aT2ALxjXtGGVoye15zYgof6GfvuH1a9QKx9Rj4Wi2vy6SyyLvtSA/lB786dTZgC+cGwe6vzmA==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4.1.8
+
+ '@ai-sdk/openai@3.0.39':
+ resolution: {integrity: sha512-EZrs4L6kMkPQhpodagpEvqLSryOIK99WgblN0IsVHr1xhajWizQOZ0XMa7c5JpSYgIjV6u8GCpGV6hS3Mk2Bug==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4.1.8
+
+ '@ai-sdk/provider-utils@4.0.17':
+ resolution: {integrity: sha512-oyCeFINTYK0B8ZGUBiQc05G5vytPlKSmTTtm19xfJuUgoi8zkvvRcoPQci4mSnyfpPn2XSFFDfsALG8uGcapfg==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4.1.8
+
+ '@ai-sdk/provider@3.0.8':
+ resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==}
+ engines: {node: '>=18'}
+
'@alloc/quick-lru@5.2.0':
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
engines: {node: '>=10'}
@@ -3218,6 +3298,10 @@ packages:
cpu: [x64]
os: [win32]
+ '@vercel/oidc@3.1.0':
+ resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==}
+ engines: {node: '>= 20'}
+
'@vitest/expect@2.0.5':
resolution: {integrity: sha512-yHZtwuP7JZivj65Gxoi8upUN2OzHTi3zVfjwdpu2WrvCZPLwsJ2Ey5ILIPccoW23dd/zQBlJ4/dhi7DWNyXCpA==}
@@ -3331,6 +3415,12 @@ packages:
engines: {node: '>=0.4.0'}
hasBin: true
+ ai@6.0.111:
+ resolution: {integrity: sha512-K5aikNm4JGfJkzwIr3yA/qhOYIOIvOqjCxSQjQQ7bWWqm0uuPO2/qgdXL23gYJdTLPPYfvi2TTS+bg2Yp+r2Lw==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ zod: ^3.25.76 || ^4.1.8
+
ajv-formats@2.1.1:
resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==}
peerDependencies:
@@ -4202,6 +4292,10 @@ packages:
resolution: {integrity: sha512-6RxOBZ/cYgd8usLwsEl+EC09Au/9BcmCKYF2/xbml6DNczf7nv0MQb+7BA2F+li6//I+28VNlQR37XfQtcAJuA==}
engines: {node: '>=18.0.0'}
+ eventsource-parser@3.0.6:
+ resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
+ engines: {node: '>=18.0.0'}
+
expect-type@1.3.0:
resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==}
engines: {node: '>=12.0.0'}
@@ -4858,6 +4952,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
+ json-schema@0.4.0:
+ resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
+
json-stable-stringify-without-jsonify@1.0.1:
resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
@@ -6630,6 +6727,9 @@ packages:
zod@3.25.67:
resolution: {integrity: sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==}
+ zod@3.25.76:
+ resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
+
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
@@ -6658,8 +6758,46 @@ snapshots:
'@ag-ui/core@0.0.45':
dependencies:
rxjs: 7.8.1
+ zod: 3.25.76
+
+ '@ai-sdk/gateway@3.0.63(zod@3.25.67)':
+ dependencies:
+ '@ai-sdk/provider': 3.0.8
+ '@ai-sdk/provider-utils': 4.0.17(zod@3.25.67)
+ '@vercel/oidc': 3.1.0
zod: 3.25.67
+ '@ai-sdk/gateway@3.0.63(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 3.0.8
+ '@ai-sdk/provider-utils': 4.0.17(zod@3.25.76)
+ '@vercel/oidc': 3.1.0
+ zod: 3.25.76
+
+ '@ai-sdk/openai@3.0.39(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 3.0.8
+ '@ai-sdk/provider-utils': 4.0.17(zod@3.25.76)
+ zod: 3.25.76
+
+ '@ai-sdk/provider-utils@4.0.17(zod@3.25.67)':
+ dependencies:
+ '@ai-sdk/provider': 3.0.8
+ '@standard-schema/spec': 1.1.0
+ eventsource-parser: 3.0.6
+ zod: 3.25.67
+
+ '@ai-sdk/provider-utils@4.0.17(zod@3.25.76)':
+ dependencies:
+ '@ai-sdk/provider': 3.0.8
+ '@standard-schema/spec': 1.1.0
+ eventsource-parser: 3.0.6
+ zod: 3.25.76
+
+ '@ai-sdk/provider@3.0.8':
+ dependencies:
+ json-schema: 0.4.0
+
'@alloc/quick-lru@5.2.0': {}
'@ampproject/remapping@2.3.0':
@@ -9314,6 +9452,8 @@ snapshots:
'@unrs/resolver-binding-win32-x64-msvc@1.11.1':
optional: true
+ '@vercel/oidc@3.1.0': {}
+
'@vitest/expect@2.0.5':
dependencies:
'@vitest/spy': 2.0.5
@@ -9477,6 +9617,22 @@ snapshots:
acorn@8.16.0: {}
+ ai@6.0.111(zod@3.25.67):
+ dependencies:
+ '@ai-sdk/gateway': 3.0.63(zod@3.25.67)
+ '@ai-sdk/provider': 3.0.8
+ '@ai-sdk/provider-utils': 4.0.17(zod@3.25.67)
+ '@opentelemetry/api': 1.9.0
+ zod: 3.25.67
+
+ ai@6.0.111(zod@3.25.76):
+ dependencies:
+ '@ai-sdk/gateway': 3.0.63(zod@3.25.76)
+ '@ai-sdk/provider': 3.0.8
+ '@ai-sdk/provider-utils': 4.0.17(zod@3.25.76)
+ '@opentelemetry/api': 1.9.0
+ zod: 3.25.76
+
ajv-formats@2.1.1(ajv@8.17.1):
optionalDependencies:
ajv: 8.17.1
@@ -10685,6 +10841,8 @@ snapshots:
eventsource-parser@3.0.2: {}
+ eventsource-parser@3.0.6: {}
+
expect-type@1.3.0: {}
extend@3.0.2: {}
@@ -11410,6 +11568,8 @@ snapshots:
json-schema-traverse@1.0.0: {}
+ json-schema@0.4.0: {}
+
json-stable-stringify-without-jsonify@1.0.1: {}
json5@1.0.2:
@@ -13765,6 +13925,8 @@ snapshots:
zod@3.25.67: {}
+ zod@3.25.76: {}
+
zod@4.3.6: {}
zustand@4.5.7(@types/react@19.2.14)(react@19.2.4):