- {m?.experimental_attachments
+ {m?.attachments
?.filter(attachment =>
attachment?.contentType?.startsWith('image/'),
)
@@ -237,7 +235,7 @@ export default function Chat() {
className="fixed bottom-0 w-full max-w-md p-2 mb-8 border border-gray-300 rounded shadow-xl space-y-2"
onSubmit={event => {
handleSubmit(event, {
- experimental_attachments: files,
+ attachments: files,
});
setFiles(undefined);
@@ -274,7 +272,7 @@ In this code, you:
1. Create state to hold the files and create a ref to the file input field.
2. Display the "uploaded" files in the UI.
-3. Update the `onSubmit` function, to call the `handleSubmit` function manually, passing the files as an option using the `experimental_attachments` key.
+3. Update the `onSubmit` function, to call the `handleSubmit` function manually, passing the files as an option using the `attachments` key.
4. Add a file input field to the form, including an `onChange` handler to handle updating the files state.
## Running Your Application
@@ -304,31 +302,29 @@ ANTHROPIC_API_KEY=xxxxxxxxx
3. Modify your route handler:
-```tsx filename="app/api/chat/route.ts" highlight="2,10-15,18-20"
+```tsx filename="app/api/chat/route.ts" highlight="2,10-15,18-21"
import { openai } from '@ai-sdk/openai';
import { anthropic } from '@ai-sdk/anthropic';
-import { streamText, type Message } from 'ai';
+import { streamText, convertToModelMessages, UIMessage } from 'ai';
export const maxDuration = 30;
export async function POST(req: Request) {
- const { messages }: { messages: Message[] } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
// check if user has sent a PDF
const messagesHavePDF = messages.some(message =>
- message.experimental_attachments?.some(
- a => a.contentType === 'application/pdf',
- ),
+ message.attachments?.some(a => a.contentType === 'application/pdf'),
);
const result = streamText({
model: messagesHavePDF
? anthropic('claude-3-5-sonnet-latest')
: openai('gpt-4o'),
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse();
+ return result.toUIMessageStreamResponse();
}
```
@@ -356,7 +352,7 @@ export default function Chat() {
{m.role === 'user' ? 'User: ' : 'AI: '}
{m.content}
- {m?.experimental_attachments
+ {m?.attachments
?.filter(
attachment =>
attachment?.contentType?.startsWith('image/') ||
@@ -389,7 +385,7 @@ export default function Chat() {
className="fixed bottom-0 w-full max-w-md p-2 mb-8 border border-gray-300 rounded shadow-xl space-y-2"
onSubmit={event => {
handleSubmit(event, {
- experimental_attachments: files,
+ attachments: files,
});
setFiles(undefined);
diff --git a/content/docs/02-guides/03-slackbot.mdx b/content/cookbook/00-guides/03-slackbot.mdx
similarity index 98%
rename from content/docs/02-guides/03-slackbot.mdx
rename to content/cookbook/00-guides/03-slackbot.mdx
index faa9efd8926e..9191b337506a 100644
--- a/content/docs/02-guides/03-slackbot.mdx
+++ b/content/cookbook/00-guides/03-slackbot.mdx
@@ -1,6 +1,7 @@
---
title: Slackbot Guide
description: Learn how to use the AI SDK to build an AI Slackbot.
+tags: ['agents', 'chatbot']
---
# Building a Slack AI Chatbot with the AI SDK
@@ -28,7 +29,7 @@ Before we start building, you'll need to create and configure a Slack app:
This project uses the following stack:
-- [AI SDK by Vercel](https://sdk.vercel.ai/docs)
+- [AI SDK by Vercel](/docs)
- [Slack Web API](https://api.slack.com/web)
- [Vercel](https://vercel.com)
- [OpenAI](https://openai.com)
@@ -318,10 +319,11 @@ Here's how to implement it:
```typescript filename="lib/generate-response.ts"
import { openai } from '@ai-sdk/openai';
-import { CoreMessage, generateText } from 'ai';
+import { generateText } from 'ai';
+import type { ModelMessage } from 'ai';
export const generateResponse = async (
- messages: CoreMessage[],
+ messages: ModelMessage[],
updateStatus?: (status: string) => void,
) => {
const { text } = await generateText({
@@ -349,12 +351,13 @@ The real power of the AI SDK comes from tools that enable your bot to perform ac
```typescript filename="lib/generate-response.ts"
import { openai } from '@ai-sdk/openai';
-import { CoreMessage, generateText, tool } from 'ai';
+import { generateText, tool } from 'ai';
+import type { ModelMessage } from 'ai';
import { z } from 'zod';
import { exa } from './utils';
export const generateResponse = async (
- messages: CoreMessage[],
+ messages: ModelMessage[],
updateStatus?: (status: string) => void,
) => {
const { text } = await generateText({
diff --git a/content/docs/02-guides/04-natural-language-postgres.mdx b/content/cookbook/00-guides/04-natural-language-postgres.mdx
similarity index 99%
rename from content/docs/02-guides/04-natural-language-postgres.mdx
rename to content/cookbook/00-guides/04-natural-language-postgres.mdx
index dd4a829901a9..c0f740e287ce 100644
--- a/content/docs/02-guides/04-natural-language-postgres.mdx
+++ b/content/cookbook/00-guides/04-natural-language-postgres.mdx
@@ -1,6 +1,7 @@
---
title: Natural Language Postgres
description: Learn how to build a Next.js app that lets you talk to a PostgreSQL database in natural language.
+tags: ['agents', 'next', 'tools']
---
# Natural Language Postgres Guide
@@ -20,7 +21,7 @@ You can find a completed version of this project at [natural-language-postgres.v
This project uses the following stack:
- [Next.js](https://nextjs.org) (App Router)
-- [AI SDK](https://sdk.vercel.ai/docs)
+- [AI SDK](/docs)
- [OpenAI](https://openai.com)
- [Zod](https://zod.dev)
- [Postgres](https://www.postgresql.org/) with [ Vercel Postgres ](https://vercel.com/postgres)
diff --git a/content/docs/02-guides/05-computer-use.mdx b/content/cookbook/00-guides/05-computer-use.mdx
similarity index 96%
rename from content/docs/02-guides/05-computer-use.mdx
rename to content/cookbook/00-guides/05-computer-use.mdx
index a3304b60fdb8..de8948ac856c 100644
--- a/content/docs/02-guides/05-computer-use.mdx
+++ b/content/cookbook/00-guides/05-computer-use.mdx
@@ -1,6 +1,7 @@
---
title: Get started with Computer Use
description: Get started with Claude's Computer Use capabilities with the AI SDK
+tags: ['computer-use', 'tools']
---
# Get started with Computer Use
@@ -83,7 +84,7 @@ First, ensure you have the AI SDK and [Anthropic AI SDK provider](/providers/ai-
-You can add Computer Use to your AI SDK applications using provider-defined tools. These tools accept various input parameters (like display height and width in the case of the computer tool) and then require that you define an execute function.
+You can add Computer Use to your AI SDK applications using provider-defined-client tools. These tools accept various input parameters (like display height and width in the case of the computer tool) and then require that you define an execute function.
Here's how you could set up the Computer Tool with the AI SDK:
@@ -110,7 +111,7 @@ const computerTool = anthropic.tools.computer_20241022({
experimental_toToolResultContent(result) {
return typeof result === 'string'
? [{ type: 'text', text: result }]
- : [{ type: 'image', data: result.data, mimeType: 'image/png' }];
+ : [{ type: 'image', data: result.data, mediaType: 'image/png' }];
},
});
```
@@ -138,7 +139,7 @@ const result = await generateText({
tools: { computer: computerTool },
});
-console.log(response.text);
+console.log(result.text);
```
For streaming responses, use `streamText` to receive updates in real-time:
diff --git a/content/cookbook/00-guides/18-claude-4.mdx b/content/cookbook/00-guides/18-claude-4.mdx
new file mode 100644
index 000000000000..5f2efc42cb93
--- /dev/null
+++ b/content/cookbook/00-guides/18-claude-4.mdx
@@ -0,0 +1,212 @@
+---
+title: Get started with Claude 4
+description: Get started with Claude 4 using the AI SDK.
+tags: ['getting-started']
+---
+
+# Get started with Claude 4
+
+With the release of Claude 4, there has never been a better time to start building AI applications, particularly those that require complex reasoning capabilities and advanced intelligence.
+
+The [AI SDK](/) is a powerful TypeScript toolkit for building AI applications with large language models (LLMs) like Claude 4 alongside popular frameworks like React, Next.js, Vue, Svelte, Node.js, and more.
+
+## Claude 4
+
+Claude 4 is Anthropic's most advanced model family to date, offering exceptional capabilities across reasoning, instruction following, coding, and knowledge tasks. Available in two variants—Sonnet and Opus—Claude 4 delivers state-of-the-art performance with enhanced reliability and control. Claude 4 builds on the extended thinking capabilities introduced in Claude 3.7, allowing for even more sophisticated problem-solving through careful, step-by-step reasoning.
+
+Claude 4 excels at complex reasoning, code generation and analysis, detailed content creation, and agentic capabilities, making it ideal for powering sophisticated AI workflows, customer-facing agents, and applications requiring nuanced understanding and responses. Claude Opus 4 is an excellent coding model, leading on SWE-bench (72.5%) and Terminal-bench (43.2%), with the ability to sustain performance on long-running tasks that require focused effort and thousands of steps. Claude Sonnet 4 significantly improves on Sonnet 3.7, excelling in coding with 72.7% on SWE-bench while balancing performance and efficiency.
+
+### Prompt Engineering for Claude 4 Models
+
+Claude 4 models respond well to clear, explicit instructions. The following best practices can help achieve optimal performance:
+
+1. **Provide explicit instructions**: Clearly state what you want the model to do, including specific steps or formats for the response.
+2. **Include context and motivation**: Explain why a task is being performed to help the model better understand the underlying goals.
+3. **Avoid negative examples**: When providing examples, only demonstrate the behavior you want to see, not what you want to avoid.
+
+## Getting Started with the AI SDK
+
+The AI SDK is the TypeScript toolkit designed to help developers build AI-powered applications with React, Next.js, Vue, Svelte, Node.js, and more. Integrating LLMs into applications is complicated and heavily dependent on the specific model provider you use.
+
+The AI SDK abstracts away the differences between model providers, eliminates boilerplate code for building chatbots, and allows you to go beyond text output to generate rich, interactive components.
+
+At the center of the AI SDK is [AI SDK Core](/docs/ai-sdk-core/overview), which provides a unified API to call any LLM. The code snippet below is all you need to call Claude 3.7 Sonnet with the AI SDK:
+
+```ts
+import { anthropic } from '@ai-sdk/anthropic';
+import { generateText } from 'ai';
+
+const { text, reasoning, reasoningDetails } = await generateText({
+ model: anthropic('claude-4-sonnet-20250514'),
+ prompt: 'How will quantum computing impact cryptography by 2050?',
+});
+console.log(text);
+```
+
+### Reasoning Ability
+
+Claude 4 enhances the extended thinking capabilities first introduced in Claude 3.7 Sonnet—the ability to solve complex problems with careful, step-by-step reasoning. Additionally, both Opus 4 and Sonnet 4 can now use tools during extended thinking, allowing Claude to alternate between reasoning and tool use to improve responses. You can enable extended thinking using the `thinking` provider option and specifying a thinking budget in tokens. For interleaved thinking (where Claude can think in between tool calls) you'll need to enable a beta feature using the `anthropic-beta` header:
+
+```ts
+import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic';
+import { generateText } from 'ai';
+
+const { text, reasoning, reasoningDetails } = await generateText({
+ model: anthropic('claude-4-sonnet-20250514'),
+ prompt: 'How will quantum computing impact cryptography by 2050?',
+ providerOptions: {
+ anthropic: {
+ thinking: { type: 'enabled', budgetTokens: 15000 },
+ } satisfies AnthropicProviderOptions,
+ },
+ headers: {
+ 'anthropic-beta': 'interleaved-thinking-2025-05-14',
+ },
+});
+
+console.log(text); // text response
+console.log(reasoning); // reasoning text
+console.log(reasoningDetails); // reasoning details including redacted reasoning
+```
+
+### Building Interactive Interfaces
+
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+
+AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
+
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+
+Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and Claude Sonnet 4:
+
+In a new Next.js application, first install the AI SDK and the Anthropic provider:
+
+
+
+Then, create a route handler for the chat endpoint:
+
+```tsx filename="app/api/chat/route.ts"
+import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic';
+import { streamText } from 'ai';
+
+export async function POST(req: Request) {
+ const { messages } = await req.json();
+
+ const result = streamText({
+ model: anthropic('claude-4-sonnet-20250514'),
+ messages,
+ headers: {
+ 'anthropic-beta': 'interleaved-thinking-2025-05-14',
+ },
+ providerOptions: {
+ anthropic: {
+ thinking: { type: 'enabled', budgetTokens: 15000 },
+ } satisfies AnthropicProviderOptions,
+ },
+ });
+
+ return result.toDataStreamResponse({
+ sendReasoning: true,
+ });
+}
+```
+
+
+ You can forward the model's reasoning tokens to the client with
+ `sendReasoning: true` in the `toDataStreamResponse` method.
+
+
+Finally, update the root page (`app/page.tsx`) to use the `useChat` hook:
+
+```tsx filename="app/page.tsx"
+'use client';
+
+import { useChat } from '@ai-sdk/react';
+
+export default function Page() {
+ const { messages, input, handleInputChange, handleSubmit, error } = useChat();
+
+ return (
+
+
+ {messages.map(message => (
+
+
+ {message.role === 'user' ? 'You' : 'Claude 4'}
+
+ {message.parts.map((part, index) => {
+ if (part.type === 'text') {
+ return (
+
+ {part.text}
+
+ );
+ }
+ if (part.type === 'reasoning') {
+ return (
+
+
+
+ View reasoning
+
+ {part.details.map(detail =>
+ detail.type === 'text' ? detail.text : '',
+ )}
+
+
+ );
+ }
+ })}
+
+ ))}
+
+
+
+ );
+}
+```
+
+
+ You can access the model's reasoning tokens with the `reasoning` part on the
+ message `parts`.
+
+
+The useChat hook on your root page (`app/page.tsx`) will make a request to your AI provider endpoint (`app/api/chat/route.ts`) whenever the user submits a message. The messages are then displayed in the chat UI.
+
+### Claude 4 Model Variants
+
+Claude 4 is available in two variants, each optimized for different use cases:
+
+- **Claude Sonnet 4**: Balanced performance suitable for most enterprise applications, with significant improvements over Sonnet 3.7.
+- **Claude Opus 4**: Anthropic's most powerful model and the best coding model available. Excels at sustained performance on long-running tasks that require focused effort and thousands of steps, with the ability to work continuously for several hours.
+
+## Get Started
+
+Ready to dive in? Here's how you can begin:
+
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) at [ai-sdk.dev/docs/guides](/docs/guides).
+4. Use ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/19-openai-responses.mdx b/content/cookbook/00-guides/19-openai-responses.mdx
similarity index 93%
rename from content/docs/02-guides/19-openai-responses.mdx
rename to content/cookbook/00-guides/19-openai-responses.mdx
index f53fdfe71e2e..d118aaed0d36 100644
--- a/content/docs/02-guides/19-openai-responses.mdx
+++ b/content/cookbook/00-guides/19-openai-responses.mdx
@@ -1,6 +1,7 @@
---
title: OpenAI Responses API
description: Get started with the OpenAI Responses API using the AI SDK.
+tags: ['getting-started', 'agents']
---
# Get started with OpenAI Responses API
@@ -180,8 +181,13 @@ import { openai } from '@ai-sdk/openai';
// Completions API
const { text } = await generateText({
- model: openai.responses('gpt-4o', { parallelToolCalls: false }),
+ model: openai('gpt-4o'),
prompt: 'Explain the concept of quantum entanglement.',
+ providerOptions: {
+ openai: {
+ parallelToolCalls: false,
+ },
+ },
});
// Responses API
@@ -200,7 +206,7 @@ const { text } = await generateText({
Ready to get started? Here's how you can dive in:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the full capabilities of the AI SDK.
-2. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action and get inspired for your own projects.
-3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the full capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action and get inspired for your own projects.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [ai-sdk.dev/docs/guides](/docs/guides).
4. Check out ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/20-sonnet-3-7.mdx b/content/cookbook/00-guides/20-sonnet-3-7.mdx
similarity index 89%
rename from content/docs/02-guides/20-sonnet-3-7.mdx
rename to content/cookbook/00-guides/20-sonnet-3-7.mdx
index 1f5c0650f3f8..ed648938ac02 100644
--- a/content/docs/02-guides/20-sonnet-3-7.mdx
+++ b/content/cookbook/00-guides/20-sonnet-3-7.mdx
@@ -1,6 +1,7 @@
---
title: Get started with Claude 3.7 Sonnet
description: Get started with Claude 3.7 Sonnet using the AI SDK.
+tags: ['getting-started']
---
# Get started with Claude 3.7 Sonnet
@@ -69,11 +70,11 @@ console.log(text); // text response
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and Claude 3.7 Sonnet:
@@ -85,14 +86,14 @@ Then, create a route handler for the chat endpoint:
```tsx filename="app/api/chat/route.ts"
import { anthropic, AnthropicProviderOptions } from '@ai-sdk/anthropic';
-import { streamText } from 'ai';
+import { streamText, UIMessage, convertToModelMessages } from 'ai';
export async function POST(req: Request) {
- const { messages } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: anthropic('claude-3-7-sonnet-20250219'),
- messages,
+ messages: convertToModelMessages(messages),
providerOptions: {
anthropic: {
thinking: { type: 'enabled', budgetTokens: 12000 },
@@ -100,7 +101,7 @@ export async function POST(req: Request) {
},
});
- return result.toDataStreamResponse({
+ return result.toUIMessageStreamResponse({
sendReasoning: true,
});
}
@@ -108,7 +109,7 @@ export async function POST(req: Request) {
You can forward the model's reasoning tokens to the client with
- `sendReasoning: true` in the `toDataStreamResponse` method.
+ `sendReasoning: true` in the `toUIMessageStreamResponse` method.
Finally, update the root page (`app/page.tsx`) to use the `useChat` hook:
@@ -164,9 +165,9 @@ The useChat hook on your root page (`app/page.tsx`) will make a request to your
Ready to dive in? Here's how you can begin:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the capabilities of the AI SDK.
-2. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action.
-3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) at [ai-sdk.dev/docs/guides](/docs/guides).
4. Use ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
Claude 3.7 Sonnet opens new opportunities for reasoning-intensive AI applications. Start building today and leverage the power of advanced reasoning in your AI projects.
diff --git a/content/docs/02-guides/21-llama-3_1.mdx b/content/cookbook/00-guides/21-llama-3_1.mdx
similarity index 94%
rename from content/docs/02-guides/21-llama-3_1.mdx
rename to content/cookbook/00-guides/21-llama-3_1.mdx
index 1e78c2913ba9..bd447d93c56d 100644
--- a/content/docs/02-guides/21-llama-3_1.mdx
+++ b/content/cookbook/00-guides/21-llama-3_1.mdx
@@ -1,6 +1,7 @@
---
title: Get started with Llama 3.1
description: Get started with Llama 3.1 using the AI SDK.
+tags: ['getting-started']
---
# Get started with Llama 3.1
@@ -42,9 +43,9 @@ The AI SDK abstracts away the differences between model providers, eliminates bo
At the center of the AI SDK is [AI SDK Core](/docs/ai-sdk-core/overview), which provides a unified API to call any LLM. The code snippet below is all you need to call Llama 3.1 (using [DeepInfra](https://deepinfra.com)) with the AI SDK:
-```tsx
-import { generateText } from 'ai';
+```ts
import { deepinfra } from '@ai-sdk/deepinfra';
+import { generateText } from 'ai';
const { text } = await generateText({
model: deepinfra('meta-llama/Meta-Llama-3.1-405B-Instruct'),
@@ -90,7 +91,7 @@ const { textStream } = streamText({
While text generation can be useful, you might want to generate structured JSON data. For example, you might want to extract information from text, classify data, or generate synthetic data. AI SDK Core provides two functions ([`generateObject`](/docs/reference/ai-sdk-core/generate-object) and [`streamObject`](/docs/reference/ai-sdk-core/stream-object)) to generate structured data, allowing you to constrain model outputs to a specific schema.
-```tsx
+```ts
import { generateObject } from 'ai';
import { deepinfra } from '@ai-sdk/deepinfra';
import { z } from 'zod';
@@ -120,16 +121,16 @@ The AI SDK supports tool usage across several of its functions, including [`gene
Here's an example of how you can use a tool with the AI SDK and Llama 3.1:
-```tsx
+```ts
import { generateText, tool } from 'ai';
import { deepinfra } from '@ai-sdk/deepinfra';
-import { getWeather } from './weatherTool';
+import { z } from 'zod';
const { text } = await generateText({
model: deepinfra('meta-llama/Meta-Llama-3.1-70B-Instruct'),
prompt: 'What is the weather like today?',
tools: {
- weather: tool({
+ getWeather: tool({
description: 'Get the weather in a location',
parameters: z.object({
location: z.string().describe('The location to get the weather for'),
@@ -186,17 +187,17 @@ In this example, the agent can use the calculator tool multiple times if needed,
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and Llama 3.1 (via [DeepInfra](https://deepinfra.com)):
```tsx filename="app/api/chat/route.ts"
-import { streamText } from 'ai';
import { deepinfra } from '@ai-sdk/deepinfra';
+import { convertToModelMessages, streamText } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
@@ -206,11 +207,10 @@ export async function POST(req: Request) {
const result = streamText({
model: deepinfra('meta-llama/Meta-Llama-3.1-70B-Instruct'),
- system: 'You are a helpful assistant.',
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse();
+ return result.toUIMessageStreamResponse();
}
```
@@ -254,7 +254,7 @@ First, create a Server Action.
```tsx filename="app/actions.tsx"
'use server';
-import { streamUI } from 'ai/rsc';
+import { streamUI } from '@ai-sdk/rsc';
import { deepinfra } from '@ai-sdk/deepinfra';
import { z } from 'zod';
@@ -327,7 +327,7 @@ import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
const { text } = await generateText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-4.1'),
prompt: 'What is love?',
});
```
@@ -347,7 +347,7 @@ const { text } = await generateText({
Thanks to the unified API, the core structure of the code remains the same. The main differences are:
1. Creating a DeepInfra client
-2. Changing the model name from `openai("gpt-4-turbo")` to `deepinfra("meta-llama/Meta-Llama-3.1-70B-Instruct")`.
+2. Changing the model name from `openai("gpt-4.1")` to `deepinfra("meta-llama/Meta-Llama-3.1-70B-Instruct")`.
With just these few changes, you've migrated from using OpenAI's GPT-4-Turbo to Meta's Llama 3.1 hosted on DeepInfra. The `generateText` function and its usage remain identical, showcasing the power of the AI SDK's unified API.
@@ -386,7 +386,7 @@ The AI SDK ensures that your application remains clean and modular, accelerating
Ready to get started? Here's how you can dive in:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the full capabilities of the AI SDK.
-2. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action and get inspired for your own projects.
-3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the full capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action and get inspired for your own projects.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [ai-sdk.dev/docs/guides](/docs/guides).
4. Check out ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/22-gpt-4-5.mdx b/content/cookbook/00-guides/22-gpt-4-5.mdx
similarity index 87%
rename from content/docs/02-guides/22-gpt-4-5.mdx
rename to content/cookbook/00-guides/22-gpt-4-5.mdx
index db23cfed35dc..e3c0ff203559 100644
--- a/content/docs/02-guides/22-gpt-4-5.mdx
+++ b/content/cookbook/00-guides/22-gpt-4-5.mdx
@@ -1,6 +1,7 @@
---
-title: Get started with OpenAI GPT-4.5
-description: Get started with OpenAI GPT-4.5 using the AI SDK.
+title: Get started with GPT-4.5
+description: Get started with GPT-4.5 using the AI SDK.
+tags: ['getting-started']
---
# Get started with OpenAI GPT-4.5
@@ -80,6 +81,7 @@ GPT-4.5 supports tool calling out of the box, allowing it to interact with exter
```ts
import { generateText, tool } from 'ai';
import { openai } from '@ai-sdk/openai';
+import { z } from 'zod';
const { text } = await generateText({
model: openai('gpt-4.5-preview'),
@@ -103,36 +105,36 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI GPT-4.5:
In a new Next.js application, first install the AI SDK and the OpenAI provider:
-
+
Then, create a route handler for the chat endpoint:
```tsx filename="app/api/chat/route.ts"
import { openai } from '@ai-sdk/openai';
-import { streamText } from 'ai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
// Allow responses up to 30 seconds
export const maxDuration = 30;
export async function POST(req: Request) {
- const { messages } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: openai('gpt-4.5-preview'),
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse();
+ return result.toUIMessageStreamResponse();
}
```
@@ -169,7 +171,7 @@ The useChat hook on your root page (`app/page.tsx`) will make a request to your
Ready to get started? Here's how you can dive in:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the full capabilities of the AI SDK.
-2. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action and get inspired for your own projects.
-3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the full capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action and get inspired for your own projects.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [ai-sdk.dev/docs/guides](/docs/guides).
4. Check out ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/23-o1.mdx b/content/cookbook/00-guides/23-o1.mdx
similarity index 91%
rename from content/docs/02-guides/23-o1.mdx
rename to content/cookbook/00-guides/23-o1.mdx
index 675ea86997ae..5ef511ffc8f3 100644
--- a/content/docs/02-guides/23-o1.mdx
+++ b/content/cookbook/00-guides/23-o1.mdx
@@ -1,6 +1,7 @@
---
title: Get started with OpenAI o1
description: Get started with OpenAI o1 using the AI SDK.
+tags: ['getting-started', 'reasoning']
---
# Get started with OpenAI o1
@@ -52,7 +53,7 @@ The AI SDK abstracts away the differences between model providers, eliminates bo
At the center of the AI SDK is [AI SDK Core](/docs/ai-sdk-core/overview), which provides a unified API to call any LLM. The code snippet below is all you need to call OpenAI o1-mini with the AI SDK:
-```tsx
+```ts
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
@@ -69,7 +70,7 @@ const { text } = await generateText({
AI SDK Core abstracts away the differences between model providers, allowing you to focus on building great applications. The unified interface also means that you can easily switch between models by changing just one line of code.
-```tsx highlight="5"
+```ts highlight="5"
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
@@ -88,7 +89,7 @@ const { text } = await generateText({
You can control the amount of reasoning effort expended by o1 through the `reasoningEffort` parameter.
This parameter can be set to `'low'`, `'medium'`, or `'high'` to adjust how much time and computation the model spends on internal reasoning before producing a response.
-```tsx highlight="9"
+```ts highlight="9"
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
@@ -111,7 +112,7 @@ const { text } = await generateText({
While text generation can be useful, you might want to generate structured JSON data. For example, you might want to extract information from text, classify data, or generate synthetic data. AI SDK Core provides two functions ([`generateObject`](/docs/reference/ai-sdk-core/generate-object) and [`streamObject`](/docs/reference/ai-sdk-core/stream-object)) to generate structured data, allowing you to constrain model outputs to a specific schema.
-```tsx
+```ts
import { generateObject } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
@@ -146,9 +147,10 @@ The AI SDK supports tool usage across several of its functions, like [`generateT
Here's an example of how you can use a tool with the AI SDK and o1:
-```tsx
+```ts
import { generateText, tool } from 'ai';
import { openai } from '@ai-sdk/openai';
+import { z } from 'zod';
const { text } = await generateText({
model: openai('o1'),
@@ -174,30 +176,30 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI o1:
```tsx filename="app/api/chat/route.ts"
import { openai } from '@ai-sdk/openai';
-import { streamText } from 'ai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
// Allow responses up to 5 minutes
export const maxDuration = 300;
export async function POST(req: Request) {
- const { messages } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: openai('o1-mini'),
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse();
+ return result.toUIMessageStreamResponse();
}
```
@@ -232,8 +234,8 @@ The useChat hook on your root page (`app/page.tsx`) will make a request to your
Ready to get started? Here's how you can dive in:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the full capabilities of the AI SDK.
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the full capabilities of the AI SDK.
1. Check out our support for the o1 series of reasoning models in the [OpenAI Provider](/providers/ai-sdk-providers/openai#reasoning-models).
-1. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action and get inspired for your own projects.
-1. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action and get inspired for your own projects.
+1. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [ai-sdk.dev/docs/guides](/docs/guides).
1. Check out ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/24-o3.mdx b/content/cookbook/00-guides/24-o3.mdx
similarity index 89%
rename from content/docs/02-guides/24-o3.mdx
rename to content/cookbook/00-guides/24-o3.mdx
index a8eba54d350d..1cf496d3041c 100644
--- a/content/docs/02-guides/24-o3.mdx
+++ b/content/cookbook/00-guides/24-o3.mdx
@@ -1,6 +1,7 @@
---
title: Get started with OpenAI o3-mini
description: Get started with OpenAI o3-mini using the AI SDK.
+tags: ['getting-started', 'reasoning']
---
# Get started with OpenAI o3-mini
@@ -52,7 +53,7 @@ The AI SDK abstracts away the differences between model providers, eliminates bo
At the center of the AI SDK is [AI SDK Core](/docs/ai-sdk-core/overview), which provides a unified API to call any LLM. The code snippet below is all you need to call OpenAI o3-mini with the AI SDK:
-```tsx
+```ts
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
@@ -75,7 +76,7 @@ const { text } = await generateText({
You can control the amount of reasoning effort expended by o3-mini through the `reasoningEffort` parameter.
This parameter can be set to `low`, `medium`, or `high` to adjust how much time and computation the model spends on internal reasoning before producing a response.
-```tsx highlight="9"
+```ts highlight="9"
import { generateText } from 'ai';
import { openai } from '@ai-sdk/openai';
@@ -93,7 +94,7 @@ const { text } = await generateText({
While text generation can be useful, you might want to generate structured JSON data. For example, you might want to extract information from text, classify data, or generate synthetic data. AI SDK Core provides two functions ([`generateObject`](/docs/reference/ai-sdk-core/generate-object) and [`streamObject`](/docs/reference/ai-sdk-core/stream-object)) to generate structured data, allowing you to constrain model outputs to a specific schema.
-```tsx
+```ts
import { generateObject } from 'ai';
import { openai } from '@ai-sdk/openai';
import { z } from 'zod';
@@ -117,9 +118,10 @@ This code snippet will generate a type-safe recipe that conforms to the specifie
o3-mini supports tool calling out of the box, allowing it to interact with external systems and perform discrete tasks. Here's an example of using tool calling with the AI SDK:
-```tsx
+```ts
import { generateText, tool } from 'ai';
import { openai } from '@ai-sdk/openai';
+import { z } from 'zod';
const { text } = await generateText({
model: openai('o3-mini'),
@@ -143,36 +145,36 @@ In this example, the `getWeather` tool allows the model to fetch real-time weath
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and OpenAI o3-mini:
In a new Next.js application, first install the AI SDK and the DeepSeek provider:
-
+
Then, create a route handler for the chat endpoint:
```tsx filename="app/api/chat/route.ts"
import { openai } from '@ai-sdk/openai';
-import { streamText } from 'ai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
// Allow responses up to 5 minutes
export const maxDuration = 300;
export async function POST(req: Request) {
- const { messages } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: openai('o3-mini'),
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse();
+ return result.toUIMessageStreamResponse();
}
```
@@ -209,8 +211,8 @@ The useChat hook on your root page (`app/page.tsx`) will make a request to your
Ready to get started? Here's how you can dive in:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the full capabilities of the AI SDK.
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the full capabilities of the AI SDK.
2. Check out our support for o3-mini in the [OpenAI Provider](/providers/ai-sdk-providers/openai#reasoning-models).
-3. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action and get inspired for your own projects.
-4. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [sdk.vercel.ai/docs/guides](/docs/guides).
+3. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action and get inspired for your own projects.
+4. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) and multi-modal chat at [ai-sdk.dev/docs/guides](/docs/guides).
5. Check out ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
diff --git a/content/docs/02-guides/25-r1.mdx b/content/cookbook/00-guides/25-r1.mdx
similarity index 90%
rename from content/docs/02-guides/25-r1.mdx
rename to content/cookbook/00-guides/25-r1.mdx
index 6ccafaa089d1..c84267e2857a 100644
--- a/content/docs/02-guides/25-r1.mdx
+++ b/content/cookbook/00-guides/25-r1.mdx
@@ -1,11 +1,12 @@
---
title: Get started with DeepSeek R1
description: Get started with DeepSeek R1 using the AI SDK.
+tags: ['getting-started', 'reasoning']
---
# Get started with DeepSeek R1
-With the [release of DeepSeek R1](https://deepseek.ai/deepseek-r1), there has never been a better time to start building AI applications, particularly those that require complex reasoning capabilities.
+With the [release of DeepSeek R1](https://api-docs.deepseek.com/news/news250528), there has never been a better time to start building AI applications, particularly those that require complex reasoning capabilities.
The [AI SDK](/) is a powerful TypeScript toolkit for building AI applications with large language models (LLMs) like DeepSeek R1 alongside popular frameworks like React, Next.js, Vue, Svelte, Node.js, and more.
@@ -125,33 +126,33 @@ You can use DeepSeek R1 with the AI SDK through various providers. Here's a comp
### Building Interactive Interfaces
-AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, SvelteKit, and SolidStart.
+AI SDK Core can be paired with [AI SDK UI](/docs/ai-sdk-ui/overview), another powerful component of the AI SDK, to streamline the process of building chat, completion, and assistant interfaces with popular frameworks like Next.js, Nuxt, and SvelteKit.
AI SDK UI provides robust abstractions that simplify the complex tasks of managing chat streams and UI updates on the frontend, enabling you to develop dynamic AI-driven interfaces more efficiently.
-With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), [`useObject`](/docs/reference/ai-sdk-ui/use-object), and [`useAssistant`](/docs/reference/ai-sdk-ui/use-assistant) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
+With four main hooks — [`useChat`](/docs/reference/ai-sdk-ui/use-chat), [`useCompletion`](/docs/reference/ai-sdk-ui/use-completion), and [`useObject`](/docs/reference/ai-sdk-ui/use-object) — you can incorporate real-time chat capabilities, text completions, streamed JSON, and interactive assistant features into your app.
Let's explore building a chatbot with [Next.js](https://nextjs.org), the AI SDK, and DeepSeek R1:
In a new Next.js application, first install the AI SDK and the DeepSeek provider:
-
+
Then, create a route handler for the chat endpoint:
```tsx filename="app/api/chat/route.ts"
import { deepseek } from '@ai-sdk/deepseek';
-import { streamText } from 'ai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
export async function POST(req: Request) {
- const { messages } = await req.json();
+ const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: deepseek('deepseek-reasoner'),
- messages,
+ messages: convertToModelMessages(messages),
});
- return result.toDataStreamResponse({
+ return result.toUIMessageStreamResponse({
sendReasoning: true,
});
}
@@ -208,9 +209,9 @@ While DeepSeek R1 models are powerful, they have certain limitations:
Ready to dive in? Here's how you can begin:
-1. Explore the documentation at [sdk.vercel.ai/docs](/docs) to understand the capabilities of the AI SDK.
-2. Check out practical examples at [sdk.vercel.ai/examples](/examples) to see the SDK in action.
-3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) at [sdk.vercel.ai/docs/guides](/docs/guides).
+1. Explore the documentation at [ai-sdk.dev/docs](/docs) to understand the capabilities of the AI SDK.
+2. Check out practical examples at [ai-sdk.dev/examples](/examples) to see the SDK in action.
+3. Dive deeper with advanced guides on topics like Retrieval-Augmented Generation (RAG) at [ai-sdk.dev/docs/guides](/docs/guides).
4. Use ready-to-deploy AI templates at [vercel.com/templates?type=ai](https://vercel.com/templates?type=ai).
DeepSeek R1 opens new opportunities for reasoning-intensive AI applications. Start building today and leverage the power of advanced reasoning in your AI projects.
diff --git a/content/docs/02-guides/index.mdx b/content/cookbook/00-guides/index.mdx
similarity index 100%
rename from content/docs/02-guides/index.mdx
rename to content/cookbook/00-guides/index.mdx
diff --git a/content/cookbook/01-next/23-chat-with-pdf.mdx b/content/cookbook/01-next/23-chat-with-pdf.mdx
index 15deb7a3bf9f..71a212688cc4 100644
--- a/content/cookbook/01-next/23-chat-with-pdf.mdx
+++ b/content/cookbook/01-next/23-chat-with-pdf.mdx
@@ -10,8 +10,7 @@ Some language models like Anthropic's Claude Sonnet 3.5 and Google's Gemini 2.0
This example requires a provider that supports PDFs, such as Anthropic's
- Claude Sonnet 3.5 or Google's Gemini 2.0. Note OpenAI's GPT-4o does not
- currently support PDFs. Check the [provider
+ Claude 3.7, Google's Gemini 2.5, or OpenAI's GPT-4.1. Check the [provider
documentation](/providers/ai-sdk-providers) for up-to-date support
information.
diff --git a/content/cookbook/01-next/24-stream-text-multistep.mdx b/content/cookbook/01-next/24-stream-text-multistep.mdx
index fec93204aa78..bee5fac89d5f 100644
--- a/content/cookbook/01-next/24-stream-text-multistep.mdx
+++ b/content/cookbook/01-next/24-stream-text-multistep.mdx
@@ -54,7 +54,10 @@ export async function POST(req: Request) {
system:
'You are a helpful assistant with a different system prompt. Repeat the extract user goal in your answer.',
// continue the workflow stream with the messages from the previous step:
- messages: [...messages, ...(await result1.response).messages],
+ messages: [
+ ...convertToCoreMessages(messages),
+ ...(await result1.response).messages,
+ ],
});
// forward the 2nd result to the client (incl. the finish event):
diff --git a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx
index 34c3eba7a223..be9f3aaed399 100644
--- a/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx
+++ b/content/cookbook/01-next/31-generate-object-with-file-prompt.mdx
@@ -94,7 +94,7 @@ export async function POST(request: Request) {
},
],
schema: z.object({
- summary: z.string().describe('A 50 word sumamry of the PDF.'),
+ summary: z.string().describe('A 50 word summary of the PDF.'),
}),
});
diff --git a/content/cookbook/01-next/73-mcp-tools.mdx b/content/cookbook/01-next/73-mcp-tools.mdx
index 36a58173a173..13570ea2fe7b 100644
--- a/content/cookbook/01-next/73-mcp-tools.mdx
+++ b/content/cookbook/01-next/73-mcp-tools.mdx
@@ -12,10 +12,15 @@ The AI SDK supports Model Context Protocol (MCP) tools by offering a lightweight
Let's create a route handler for `/api/completion` that will generate text based on the input prompt and MCP tools that can be called at any time during a generation. The route will call the `streamText` function from the `ai` module, which will then generate text based on the input prompt and stream it to the client.
+To use the `StreamableHTTPClientTransport`, you will need to install the official Typescript SDK for Model Context Protocol:
+
+
+
```ts filename="app/api/completion/route.ts"
import { experimental_createMCPClient, streamText } from 'ai';
import { Experimental_StdioMCPTransport } from 'ai/mcp-stdio';
import { openai } from '@ai-sdk/openai';
+import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp';
export async function POST(req: Request) {
const { prompt }: { prompt: string } = await req.json();
@@ -38,17 +43,17 @@ export async function POST(req: Request) {
},
});
- // Similarly to the stdio example, you can pass in your own custom transport as long as it implements the `MCPTransport` interface:
- const transport = new MyCustomTransport({
- // ...
- });
- const customTransportClient = await experimental_createMCPClient({
+ // Similarly to the stdio example, you can pass in your own custom transport as long as it implements the `MCPTransport` interface (e.g. `StreamableHTTPClientTransport`):
+ const transport = new StreamableHTTPClientTransport(
+ new URL('http://localhost:3000/mcp'),
+ );
+ const customClient = await experimental_createMCPClient({
transport,
});
const toolSetOne = await stdioClient.tools();
const toolSetTwo = await sseClient.tools();
- const toolSetThree = await customTransportClient.tools();
+ const toolSetThree = await customClient.tools();
const tools = {
...toolSetOne,
...toolSetTwo,
@@ -63,7 +68,15 @@ export async function POST(req: Request) {
onFinish: async () => {
await stdioClient.close();
await sseClient.close();
- await customTransportClient.close();
+ await customClient.close();
+ },
+ // Closing clients onError is optional
+ // - Closing: Immediately frees resources, prevents hanging connections
+ // - Not closing: Keeps connection open for retries
+ onError: async error => {
+ await stdioClient.close();
+ await sseClient.close();
+ await customClient.close();
},
});
diff --git a/content/cookbook/05-node/51-call-tools-in-parallel.mdx b/content/cookbook/05-node/51-call-tools-in-parallel.mdx
index 960884645d7c..e1b9cb8585ac 100644
--- a/content/cookbook/05-node/51-call-tools-in-parallel.mdx
+++ b/content/cookbook/05-node/51-call-tools-in-parallel.mdx
@@ -1,5 +1,5 @@
---
-title: Call Tools in Parallels
+title: Call Tools in Parallel
description: Learn how to call tools in parallel using the AI SDK and Node
tags: ['node', 'tool use']
---
diff --git a/content/cookbook/05-node/80-local-caching-middleware.mdx b/content/cookbook/05-node/80-local-caching-middleware.mdx
new file mode 100644
index 000000000000..e2a95a51287d
--- /dev/null
+++ b/content/cookbook/05-node/80-local-caching-middleware.mdx
@@ -0,0 +1,242 @@
+---
+title: Local Caching Middleware
+description: Learn how to create a caching middleware for local development.
+tags: ['streaming', 'caching', 'middleware']
+---
+
+# Local Caching Middleware
+
+When developing AI applications, you'll often find yourself repeatedly making the same API calls during development. This can lead to increased costs and slower development cycles. A caching middleware allows you to store responses locally and reuse them when the same inputs are provided.
+
+This approach is particularly useful in two scenarios:
+
+1. **Iterating on UI/UX** - When you're focused on styling and user experience, you don't want to regenerate AI responses for every code change.
+2. **Working on evals** - When developing evals, you need to repeatedly test the same prompts, but don't need new generations each time.
+
+## Implementation
+
+In this implementation, you create a JSON file to store responses. When a request is made, you first check if you have already seen this exact request. If you have, you return the cached response immediately (as a one-off generation or chunks of tokens). If not, you trigger the generation, save the response, and return it.
+
+
+ Make sure to add the path of your local cache to your `.gitignore` so you do
+ not commit it.
+
+
+### How it works
+
+For regular generations, you store and retrieve complete responses. Instead, the streaming implementation captures each token as it arrives, stores the full sequence, and on cache hits uses the SDK's `simulateReadableStream` utility to recreate the token-by-token streaming experience at a controlled speed (defaults to 10ms between chunks).
+
+This approach gives you the best of both worlds:
+
+- Instant responses for repeated queries
+- Preserved streaming behavior for UI development
+
+The middleware handles all transformations needed to make cached responses indistinguishable from fresh ones, including normalizing tool calls and fixing timestamp formats.
+
+### Middleware
+
+```ts
+import {
+ type LanguageModelV1,
+ type LanguageModelV1Middleware,
+ LanguageModelV1Prompt,
+ type LanguageModelV1StreamPart,
+ simulateReadableStream,
+ wrapLanguageModel,
+} from 'ai';
+import 'dotenv/config';
+import fs from 'fs';
+import path from 'path';
+
+const CACHE_FILE = path.join(process.cwd(), '.cache/ai-cache.json');
+
+export const cached = (model: LanguageModelV1) =>
+ wrapLanguageModel({
+ middleware: cacheMiddleware,
+ model,
+ });
+
+const ensureCacheFile = () => {
+ const cacheDir = path.dirname(CACHE_FILE);
+ if (!fs.existsSync(cacheDir)) {
+ fs.mkdirSync(cacheDir, { recursive: true });
+ }
+ if (!fs.existsSync(CACHE_FILE)) {
+ fs.writeFileSync(CACHE_FILE, '{}');
+ }
+};
+
+const getCachedResult = (key: string | object) => {
+ ensureCacheFile();
+ const cacheKey = typeof key === 'object' ? JSON.stringify(key) : key;
+ try {
+ const cacheContent = fs.readFileSync(CACHE_FILE, 'utf-8');
+
+ const cache = JSON.parse(cacheContent);
+
+ const result = cache[cacheKey];
+
+ return result ?? null;
+ } catch (error) {
+ console.error('Cache error:', error);
+ return null;
+ }
+};
+
+const updateCache = (key: string, value: any) => {
+ ensureCacheFile();
+ try {
+ const cache = JSON.parse(fs.readFileSync(CACHE_FILE, 'utf-8'));
+ const updatedCache = { ...cache, [key]: value };
+ fs.writeFileSync(CACHE_FILE, JSON.stringify(updatedCache, null, 2));
+ console.log('Cache updated for key:', key);
+ } catch (error) {
+ console.error('Failed to update cache:', error);
+ }
+};
+const cleanPrompt = (prompt: LanguageModelV1Prompt) => {
+ return prompt.map(m => {
+ if (m.role === 'assistant') {
+ return m.content.map(part =>
+ part.type === 'tool-call' ? { ...part, toolCallId: 'cached' } : part,
+ );
+ }
+ if (m.role === 'tool') {
+ return m.content.map(tc => ({
+ ...tc,
+ toolCallId: 'cached',
+ result: {},
+ }));
+ }
+
+ return m;
+ });
+};
+
+export const cacheMiddleware: LanguageModelV1Middleware = {
+ wrapGenerate: async ({ doGenerate, params }) => {
+ const cacheKey = JSON.stringify({
+ ...cleanPrompt(params.prompt),
+ _function: 'generate',
+ });
+ console.log('Cache Key:', cacheKey);
+
+ const cached = getCachedResult(cacheKey) as Awaited<
+ ReturnType
+ > | null;
+
+ if (cached && cached !== null) {
+ console.log('Cache Hit');
+ return {
+ ...cached,
+ response: {
+ ...cached.response,
+ timestamp: cached?.response?.timestamp
+ ? new Date(cached?.response?.timestamp)
+ : undefined,
+ },
+ };
+ }
+
+ console.log('Cache Miss');
+ const result = await doGenerate();
+
+ updateCache(cacheKey, result);
+
+ return result;
+ },
+ wrapStream: async ({ doStream, params }) => {
+ const cacheKey = JSON.stringify({
+ ...cleanPrompt(params.prompt),
+ _function: 'stream',
+ });
+ console.log('Cache Key:', cacheKey);
+
+ // Check if the result is in the cache
+ const cached = getCachedResult(cacheKey);
+
+ // If cached, return a simulated ReadableStream that yields the cached result
+ if (cached && cached !== null) {
+ console.log('Cache Hit');
+ // Format the timestamps in the cached response
+ const formattedChunks = (cached as LanguageModelV1StreamPart[]).map(p => {
+ if (p.type === 'response-metadata' && p.timestamp) {
+ return { ...p, timestamp: new Date(p.timestamp) };
+ } else return p;
+ });
+ return {
+ stream: simulateReadableStream({
+ initialDelayInMs: 0,
+ chunkDelayInMs: 10,
+ chunks: formattedChunks,
+ }),
+ rawCall: { rawPrompt: null, rawSettings: {} },
+ };
+ }
+
+ console.log('Cache Miss');
+ // If not cached, proceed with streaming
+ const { stream, ...rest } = await doStream();
+
+ const fullResponse: LanguageModelV1StreamPart[] = [];
+
+ const transformStream = new TransformStream<
+ LanguageModelV1StreamPart,
+ LanguageModelV1StreamPart
+ >({
+ transform(chunk, controller) {
+ fullResponse.push(chunk);
+ controller.enqueue(chunk);
+ },
+ flush() {
+ // Store the full response in the cache after streaming is complete
+ updateCache(cacheKey, fullResponse);
+ },
+ });
+
+ return {
+ stream: stream.pipeThrough(transformStream),
+ ...rest,
+ };
+ },
+};
+```
+
+## Using the Middleware
+
+The middleware can be easily integrated into your existing AI SDK setup:
+
+```ts highlight="4,8"
+import { openai } from '@ai-sdk/openai';
+import { streamText } from 'ai';
+import 'dotenv/config';
+import { cached } from '../middleware/your-cache-middleware';
+
+async function main() {
+ const result = streamText({
+ model: cached(openai('gpt-4o')),
+ maxTokens: 512,
+ temperature: 0.3,
+ maxRetries: 5,
+ prompt: 'Invent a new holiday and describe its traditions.',
+ });
+
+ for await (const textPart of result.textStream) {
+ process.stdout.write(textPart);
+ }
+
+ console.log();
+ console.log('Token usage:', await result.usage);
+ console.log('Finish reason:', await result.finishReason);
+}
+
+main().catch(console.error);
+```
+
+## Considerations
+
+When using this caching middleware, keep these points in mind:
+
+1. **Development Only** - This approach is intended for local development, not production environments
+2. **Cache Invalidation** - You'll need to clear the cache (delete the cache file) when you want fresh responses
+3. **Multi-Step Flows** - When using `maxSteps`, be aware that caching occurs at the individual language model response level, not across the entire execution flow. This means that while the model's generation is cached, the tool call is not and will run on each generation.
diff --git a/content/docs/01-introduction/index.mdx b/content/docs/00-introduction/index.mdx
similarity index 72%
rename from content/docs/01-introduction/index.mdx
rename to content/docs/00-introduction/index.mdx
index b53909739d58..daa2ae80c2f8 100644
--- a/content/docs/01-introduction/index.mdx
+++ b/content/docs/00-introduction/index.mdx
@@ -11,6 +11,14 @@ The AI SDK is the TypeScript toolkit designed to help developers build AI-powere
Integrating large language models (LLMs) into applications is complicated and heavily dependent on the specific model provider you use.
+The AI SDK standardizes integrating artificial intelligence (AI) models across [supported providers](/docs/foundations/providers-and-models). This enables developers to focus on building great AI applications, not waste time on technical details.
+
+For example, here’s how you can generate text with various models using the AI SDK:
+
+
+
+The AI SDK has two main libraries:
+
- **[AI SDK Core](/docs/ai-sdk-core):** A unified API for generating text, structured objects, tool calls, and building agents with LLMs.
- **[AI SDK UI](/docs/ai-sdk-ui):** A set of framework-agnostic hooks for quickly building chat and generative user interface.
@@ -48,15 +56,15 @@ We've built some [templates](https://vercel.com/templates?type=ai) that include
If you have questions about anything related to the AI SDK, you're always welcome to ask our community on [GitHub Discussions](https://github.com/vercel/ai/discussions).
-## `llms.txt`
+## `llms.txt` (for Cursor, Windsurf, Copilot, Claude etc.)
-You can access the entire AI SDK documentation in Markdown format at [sdk.vercel.ai/llms.txt](/llms.txt). This can be used to ask any LLM (assuming it has a big enough context window) questions about the AI SDK based on the most up-to-date documentation.
+You can access the entire AI SDK documentation in Markdown format at [ai-sdk.dev/llms.txt](/llms.txt). This can be used to ask any LLM (assuming it has a big enough context window) questions about the AI SDK based on the most up-to-date documentation.
### Example Usage
For instance, to prompt an LLM with questions about the AI SDK:
-1. Copy the documentation contents from [sdk.vercel.ai/llms.txt](/llms.txt)
+1. Copy the documentation contents from [ai-sdk.dev/llms.txt](/llms.txt)
2. Use the following prompt format:
```prompt
diff --git a/content/docs/01-announcing-ai-sdk-5-beta/index.mdx b/content/docs/01-announcing-ai-sdk-5-beta/index.mdx
new file mode 100644
index 000000000000..cd81279ed154
--- /dev/null
+++ b/content/docs/01-announcing-ai-sdk-5-beta/index.mdx
@@ -0,0 +1,535 @@
+---
+title: AI SDK 5 Beta
+description: Get started with the Beta version of AI SDK 5.
+---
+
+# Announcing AI SDK 5 Beta
+
+
+ AI SDK 5 is in beta — while more stable than alpha, APIs may still change. Pin
+ to specific versions as breaking changes may occur in minor releases.
+
+
+## Beta Version Guidance
+
+The AI SDK 5 Beta is intended for:
+
+- **New projects** where you can adopt the latest patterns from the start
+- **Trying out new features** and giving us feedback on the developer experience
+- **Experimenting with migrations** from v4 to understand the upgrade path
+- **Development and testing environments** where you can iterate quickly
+
+**Short on time?** Wait for the stable release. We're focusing on polish and migration tooling improvements.
+
+**For production applications**: Experiment with migrations in development, but avoid fully migrating production systems. Use this beta period to understand the changes and prepare your migration strategy.
+
+## What to Expect in Beta
+
+- **No major breaking changes** - the architecture is stable
+- **Minor breaking changes possible** - we may refine APIs for critical bugfixes
+- **Bug fixes and DX improvements** - active development continues
+
+Your feedback during this beta phase directly shapes the final stable release. Share your experiences through [GitHub issues](https://github.com/vercel/ai/issues/new/choose).
+
+
+ While more stable than alpha, you may still encounter bugs in this Beta
+ release. To help us improve the SDK, [file bug reports on
+ GitHub](https://github.com/vercel/ai/issues/new/choose). Your reports directly
+ contribute to making the final release more stable and reliable.
+
+
+## Installation
+
+To install the AI SDK 5 Beta, run the following command:
+
+```bash
+# replace with your provider and framework
+npm install ai@beta @ai-sdk/openai@beta @ai-sdk/react@beta
+```
+
+
+ APIs may still change during beta. Pin to specific versions as breaking
+ changes may occur in minor releases.
+
+
+## What's new in AI SDK 5?
+
+AI SDK 5 is a redesign of the AI SDK's protocol and architecture based on everything we learned over the last two years of real-world usage. We also modernized the UI and protocols that have remained largely unchanged since AI SDK v2/3, to create a strong foundation for the future.
+
+### Why a new specification (LanguageModelV2)?
+
+When we originally designed the v1 protocol over a year ago, the standard interaction pattern with language models was text in, text or tool call out. Today's LLMs go beyond text and tool calls, generating reasoning, sources, images and more. New use cases like computer-using agents introduce a fundamentally different approach to interacting with language models that made it impossible to support in a unified approach with our original architecture.
+
+We needed a protocol designed for this new reality. While this is a breaking change that we take seriously, it provided an opportunity to rebuild the foundation and add new features.
+
+## New Features
+
+- [**`LanguageModelV2`**](#languagemodelv2) - new redesigned architecture
+- [**Message Overhaul**](#message-overhaul) - new `UIMessage` and `ModelMessage` types
+- [**Server-Sent Events (SSE)**](#server-sent-events-sse) - new standardised protocol for sending UI messages to the client
+- [**Agentic Control**](#agentic-control) - new primitives for building agentic systems
+- [**Enhanced useChat Architecture**](#enhanced-usechat-architecture) - improved state management with transport system
+
+## `LanguageModelV2`
+
+`LanguageModelV2` represents a complete redesign of how the AI SDK communicates with language models, adapting to the increasingly complex outputs modern AI systems generate. The new `LanguageModelV2` treats all LLM outputs as content parts, enabling consistent handling of text, images, reasoning, sources, and other response types. It has:
+
+- **Content-First Design** - Rather than separating text, reasoning, and tool calls, everything is represented as ordered content parts in a unified array
+- **Improved Type Safety** - The new `LanguageModelV2` provides better TypeScript type guarantees, making it easier to work with different content types
+- **Extensibility** - Adding support for new model capabilities requires no changes to the core structure
+
+## Message Overhaul
+
+AI SDK 5 introduces a completely redesigned message system with two message types that address the dual needs of what you render in your UI and what you send to the model. Context is crucial for effective language model generations, and these message types serve distinct purposes:
+
+- **UIMessage** represents the complete conversation history for your interface, preserving all message parts (text, images, data), metadata (creation timestamps, generation times), and UI state.
+
+- **ModelMessage** is optimized for sending to language models, considering token input constraints. It strips away UI-specific metadata and irrelevant content.
+
+With this change, you must explicitly convert your `UIMessage`s to `ModelMessage`s before sending them to the model.
+
+```ts highlight="9"
+import { openai } from '@ai-sdk/openai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
+
+export async function POST(req: Request) {
+ const { messages }: { messages: UIMessage[] } = await req.json();
+
+ const result = streamText({
+ model: openai('gpt-4o'),
+ messages: convertToModelMessages(messages),
+ });
+
+ return result.toUIMessageStreamResponse();
+}
+```
+
+
+ This separation is essential as you can't use a single message format for both
+ purposes. Always save state in the `UIMessage` format to prevent information
+ loss, with explicit conversion to `ModelMessage` when communicating with
+ language models.
+
+
+The new message system makes several highly requested features possible:
+
+- **Type-safe Message Metadata** - Add structured information per message
+- **Type-safe Tool Calls** - Improved type safety when defining and using tools in your messages
+- **New Stream Writer** - Stream any part type (reasoning, sources, etc.) retaining proper order
+- **Data Parts** - Stream type-safe arbitrary data parts for dynamic UI components
+
+### Type-safe Tool Calls
+
+AI SDK 5 introduces type-safe tool calls in UI messages. Instead of generic `tool-invocation` types, tool parts use specific naming: `tool-${toolName}`. This provides better type safety and makes it easier to handle many tools in your UI.
+
+```tsx filename="AI SDK 4.0"
+// Generic tool-invocation type
+{
+ message.parts.map(part => {
+ if (part.type === 'tool-invocation') {
+ return {part.toolInvocation.toolName}
;
+ }
+ });
+}
+```
+
+```tsx filename="AI SDK 5.0"
+// Type-safe tool parts with specific names
+{
+ message.parts.map(part => {
+ switch (part.type) {
+ case 'tool-getWeatherInformation':
+ return Getting weather...
;
+ case 'tool-askForConfirmation':
+ return Asking for confirmation...
;
+ }
+ });
+}
+```
+
+### Message metadata
+
+Metadata allows you to attach structured information to individual messages, making it easier to track details like response time, token usage, or model specifications. This information can enhance your UI with contextual data without embedding it in the message content itself.
+
+To add metadata to a message, first define the metadata schema:
+
+```ts filename="app/api/chat/example-metadata-schema.ts"
+export const exampleMetadataSchema = z.object({
+ duration: z.number().optional(),
+ model: z.string().optional(),
+ totalTokens: z.number().optional(),
+});
+
+export type ExampleMetadata = z.infer;
+```
+
+Then add the metadata using the `message.metadata` property on the `toUIMessageStreamResponse()` utility:
+
+```ts filename="app/api/chat/route.ts"
+import { openai } from '@ai-sdk/openai';
+import { convertToModelMessages, streamText, UIMessage } from 'ai';
+import { ExampleMetadata } from './example-metadata-schema';
+
+export async function POST(req: Request) {
+ const { messages }: { messages: UIMessage[] } = await req.json();
+
+ const startTime = Date.now();
+ const result = streamText({
+ model: openai('gpt-4o'),
+ prompt: convertToModelMessages(messages),
+ });
+
+ return result.toUIMessageStreamResponse({
+ messageMetadata: ({ part }): ExampleMetadata | undefined => {
+ // send custom information to the client on start:
+ if (part.type === 'start') {
+ return {
+ model: 'gpt-4o', // initial model id
+ };
+ }
+
+ // send additional model information on finish-step:
+ if (part.type === 'finish-step') {
+ return {
+ model: part.response.modelId, // update with the actual model id
+ duration: Date.now() - startTime,
+ };
+ }
+
+ // when the message is finished, send additional information:
+ if (part.type === 'finish') {
+ return {
+ totalTokens: part.totalUsage.totalTokens,
+ };
+ }
+ },
+ });
+}
+```
+
+Finally, use the metadata type with useChat and render the (type-safe) metadata in your UI:
+
+```tsx filename="app/page.tsx"
+import { useChat } from '@ai-sdk/react';
+import { DefaultChatTransport, UIMessage } from 'ai';
+import { ExampleMetadata } from './api/chat/example-metadata-schema';
+
+type MyMessage = UIMessage;
+
+export default function Chat() {
+ const { messages } = useChat({
+ transport: new DefaultChatTransport({
+ api: '/api/chat',
+ }),
+ });
+
+ return (
+
+ {messages.map(message => (
+
+ {message.role === 'user' ? 'User: ' : 'AI: '}
+ {message.metadata?.duration && (
+
Duration: {message.metadata.duration}ms
+ )}
+ {message.metadata?.model && (
+
Model: {message.metadata.model}
+ )}
+ {message.metadata?.totalTokens && (
+
Total tokens: {message.metadata.totalTokens}
+ )}
+
+ ))}
+
+ );
+}
+```
+
+### UIMessageStream
+
+The UI Message Stream enables streaming any content parts from the server to the client. With this stream, you can send structured data like custom sources from your RAG pipeline directly to your UI. The stream writer is a utility that makes it easy to write to this message stream.
+
+```ts
+const stream = createUIMessageStream({
+ execute: writer => {
+ // stream custom sources
+ writer.write({
+ type: 'source',
+ value: {
+ type: 'source',
+ sourceType: 'url',
+ id: 'source-1',
+ url: 'https://example.com',
+ title: 'Example Source',
+ },
+ });
+ },
+});
+```
+
+On the client, these will be added to the ordered `message.parts` array.
+
+### Data Parts
+
+The new stream writer enables a type-safe way to stream arbitrary data from the server to the client and display it in your UI.
+
+You can create and stream custom data parts on the server:
+
+```tsx
+// On the server
+const stream = createUIMessageStream({
+ execute: writer => {
+ // Initial update
+ writer.write({
+ type: 'data-weather', // Custom type
+ id: toolCallId, // ID for updates
+ data: { city, status: 'loading' }, // Your data
+ });
+
+ // Later, update the same part
+ writer.write({
+ type: 'data-weather',
+ id: toolCallId,
+ data: { city, weather, status: 'success' },
+ });
+ },
+});
+```
+
+On the client, you can render these parts with full type safety:
+
+```tsx
+{
+ message.parts
+ .filter(part => part.type === 'data-weather') // type-safe
+ .map((part, index) => (
+
+ ));
+}
+```
+
+Data parts appear in the `message.parts` array along with other content, maintaining the proper ordering of the conversation. You can update parts by referencing the same ID, enabling dynamic experiences like collaborative artifacts.
+
+## Enhanced useChat Architecture
+
+AI SDK 5 introduces a new `useChat` architecture with transport-based configuration. This design makes state management and API integration flexible, allowing you to configure backend protocols without rewriting application logic.
+
+The new `useChat` hook uses a transport system for better modularity:
+
+- **Transport Configuration** – configure API endpoints and request handling through transport objects
+- **Enhanced State Management** – improved message handling with the new UIMessage format
+- **Type Safety** – stronger TypeScript support throughout the chat lifecycle
+
+Configure useChat with the transport system:
+
+```ts
+import { useChat } from '@ai-sdk/react';
+import { DefaultChatTransport } from 'ai';
+
+const { messages, sendMessage } = useChat({
+ transport: new DefaultChatTransport({
+ api: '/api/chat', // your chat endpoint
+ headers: { 'Custom-Header': 'value' },
+ }),
+ maxSteps: 5,
+});
+```
+
+## Server-Sent Events (SSE)
+
+AI SDK 5 uses Server-Sent Events (SSE) instead of a custom streaming protocol. SSE is a common web standard for sending data from servers to browsers. This switch has several advantages:
+
+- **Works everywhere** - Uses technology that works in all major browsers and platforms
+- **Easier to troubleshoot** - See the data stream in browser developer tools
+- **Simpler to build upon** - Adding new features is more straightforward
+- **More stable** - Built on proven technology that many developers already use
+
+## Agentic Control
+
+AI SDK 5 introduces new features for building agents that help you control model behavior more precisely.
+
+### prepareStep
+
+The `prepareStep` function gives you fine-grained control over each step in a multi-step agent. It's called before a step starts and allows you to:
+
+- Dynamically change the model used for specific steps
+- Force specific tool selections for particular steps
+- Limit which tools are available during specific steps
+- Examine the context of previous steps before proceeding
+
+```ts
+const result = await generateText({
+ // ...
+ experimental_prepareStep: async ({ model, stepNumber, maxSteps, steps }) => {
+ if (stepNumber === 0) {
+ return {
+ // use a different model for this step:
+ model: modelForThisParticularStep,
+ // force a tool choice for this step:
+ toolChoice: { type: 'tool', toolName: 'tool1' },
+ // limit the tools that are available for this step:
+ experimental_activeTools: ['tool1'],
+ };
+ }
+ // when nothing is returned, the default settings are used
+ },
+});
+```
+
+This makes it easier to build AI systems that adapt their capabilities based on context and task requirements.
+
+### `stopWhen`
+
+The `stopWhen` parameter lets you define stopping conditions for your agent. Instead of running indefinitely, you can specify exactly when the agent should terminate based on various conditions:
+
+- Reaching a maximum number of steps
+- Calling a specific tool
+- Satisfying any custom condition you define
+
+```ts
+const result = generateText({
+ // ...
+ // stop loop at 5 steps
+ stopWhen: stepCountIs(5),
+});
+
+const result = generateText({
+ // ...
+ // stop loop when weather tool called
+ stopWhen: hasToolCall('weather'),
+});
+
+const result = generateText({
+ // ...
+ // stop loop at your own custom condition
+ stopWhen: maxTotalTokens(20000),
+});
+```
+
+These agentic controls form the foundation for building reliable, controllable AI systems that tackle complex problems while remaining within well-defined constraints.
+
+## Additional New Features
+
+### Tool Output Schema
+
+Tools can now optionally specify an output schema for better type inference and validation:
+
+```tsx filename="AI SDK 5.0"
+import { tool } from 'ai';
+import { z } from 'zod';
+
+const weatherTool = tool({
+ description: 'Get weather information',
+ inputSchema: z.object({
+ city: z.string(),
+ }),
+ outputSchema: z.object({
+ temperature: z.number(),
+ conditions: z.string(),
+ }),
+ execute: async ({ city }) => ({
+ temperature: 72,
+ conditions: 'sunny',
+ }),
+});
+```
+
+### Tool Type Inference Helpers
+
+New utility types simplify working with tool types:
+
+```tsx filename="AI SDK 5.0"
+import { InferToolInput, InferToolOutput, InferUITool } from 'ai';
+import { weatherTool } from './weatherTool';
+
+// Infer input and output types from tool definitions
+type WeatherInput = InferToolInput;
+type WeatherOutput = InferToolOutput;
+type WeatherUITool = InferUITool;
+
+// Use in UI message type definitions
+type MyUIMessage = UIMessage<
+ never, // metadata type
+ UIDataTypes, // data parts type
+ {
+ weather: WeatherUITool;
+ }
+>;
+```
+
+### OpenAI Provider-Executed Tools
+
+New built-in tools for OpenAI:
+
+```tsx filename="AI SDK 5.0"
+import { openai } from '@ai-sdk/openai';
+
+const result = await generateText({
+ model: openai('gpt-4.1'),
+ tools: {
+ file_search: openai.tools.fileSearch(),
+ web_search_preview: openai.tools.webSearchPreview({
+ searchContextSize: 'high',
+ }),
+ },
+ messages,
+});
+```
+
+Available tools:
+
+- **`fileSearch`**: Search through uploaded documents using OpenAI's file search
+- **`webSearchPreview`**: Web search capabilities (preview feature)
+
+When using provider-defined tools like `fileSearch` and `webSearchPreview`, the tool execution results are automatically added to the message history, providing context for subsequent interactions.
+
+This automatic message history inclusion ensures that:
+
+- Tool execution context is preserved across conversation turns
+- Follow-up questions can reference previously searched information
+- The full conversation flow is maintained for debugging and logging
+
+### Enhanced Tool Streaming
+
+Tools now support fine-grained streaming callbacks:
+
+```tsx filename="AI SDK 5.0"
+const weatherTool = tool({
+ inputSchema: z.object({ city: z.string() }),
+ onInputStart: ({ toolCallId }) => {
+ console.log('Tool input streaming started:', toolCallId);
+ },
+ onInputDelta: ({ inputTextDelta, toolCallId }) => {
+ console.log('Tool input delta:', inputTextDelta);
+ },
+ onInputAvailable: ({ input, toolCallId }) => {
+ console.log('Tool input ready:', input);
+ },
+ execute: async ({ city }) => {
+ return `Weather in ${city}: sunny, 72°F`;
+ },
+});
+```
+
+## Migration from AI SDK 4.x
+
+Ready to upgrade from AI SDK 4.x to 5.0 Beta? We created a comprehensive migration guide to help you through the process.
+
+The migration involves several key changes:
+
+- Updated message format with `UIMessage` and `ModelMessage` types
+- New `useChat` architecture with transport system
+- New streaming protocol with Server-Sent Events
+- Improved type safety and developer experience
+
+**[View the complete Migration Guide →](/docs/migration-guides/migration-guide-5-0)**
+
+The migration guide includes:
+
+- Step-by-step upgrade instructions
+- Detailed examples for each breaking change
+- Best practices for adopting new features
diff --git a/content/docs/02-foundations/02-providers-and-models.mdx b/content/docs/02-foundations/02-providers-and-models.mdx
index a1902d339608..445666b0e399 100644
--- a/content/docs/02-foundations/02-providers-and-models.mdx
+++ b/content/docs/02-foundations/02-providers-and-models.mdx
@@ -25,6 +25,7 @@ Here is an overview of the AI SDK Provider Architecture:
The AI SDK comes with a wide range of providers that you can use to interact with different language models:
- [xAI Grok Provider](/providers/ai-sdk-providers/xai) (`@ai-sdk/xai`)
+- [Vercel Provider](/providers/ai-sdk-providers/vercel) (`@ai-sdk/vercel`)
- [OpenAI Provider](/providers/ai-sdk-providers/openai) (`@ai-sdk/openai`)
- [Azure OpenAI Provider](/providers/ai-sdk-providers/azure) (`@ai-sdk/azure`)
- [Anthropic Provider](/providers/ai-sdk-providers/anthropic) (`@ai-sdk/anthropic`)
@@ -40,6 +41,13 @@ The AI SDK comes with a wide range of providers that you can use to interact wit
- [Cerebras Provider](/providers/ai-sdk-providers/cerebras) (`@ai-sdk/cerebras`)
- [Groq Provider](/providers/ai-sdk-providers/groq) (`@ai-sdk/groq`)
- [Perplexity Provider](/providers/ai-sdk-providers/perplexity) (`@ai-sdk/perplexity`)
+- [ElevenLabs Provider](/providers/ai-sdk-providers/elevenlabs) (`@ai-sdk/elevenlabs`)
+- [LMNT Provider](/providers/ai-sdk-providers/lmnt) (`@ai-sdk/lmnt`)
+- [Hume Provider](/providers/ai-sdk-providers/hume) (`@ai-sdk/hume`)
+- [Rev.ai Provider](/providers/ai-sdk-providers/revai) (`@ai-sdk/revai`)
+- [Deepgram Provider](/providers/ai-sdk-providers/deepgram) (`@ai-sdk/deepgram`)
+- [Gladia Provider](/providers/ai-sdk-providers/gladia) (`@ai-sdk/gladia`)
+- [AssemblyAI Provider](/providers/ai-sdk-providers/assemblyai) (`@ai-sdk/assemblyai`)
You can also use the [OpenAI Compatible provider](/providers/openai-compatible-providers) with OpenAI-compatible APIs:
@@ -56,13 +64,17 @@ The open-source community has created the following providers:
- [Portkey Provider](/providers/community-providers/portkey) (`@portkey-ai/vercel-provider`)
- [Cloudflare Workers AI Provider](/providers/community-providers/cloudflare-workers-ai) (`workers-ai-provider`)
- [OpenRouter Provider](/providers/community-providers/openrouter) (`@openrouter/ai-sdk-provider`)
+- [Requesty Provider](/providers/community-providers/requesty) (`@requesty/ai-sdk`)
- [Crosshatch Provider](/providers/community-providers/crosshatch) (`@crosshatch/ai-provider`)
- [Mixedbread Provider](/providers/community-providers/mixedbread) (`mixedbread-ai-provider`)
- [Voyage AI Provider](/providers/community-providers/voyage-ai) (`voyage-ai-provider`)
- [Mem0 Provider](/providers/community-providers/mem0)(`@mem0/vercel-ai-provider`)
+- [Letta Provider](/providers/community-providers/letta)(`@letta-ai/vercel-ai-sdk-provider`)
- [Spark Provider](/providers/community-providers/spark) (`spark-ai-provider`)
- [AnthropicVertex Provider](/providers/community-providers/anthropic-vertex-ai) (`anthropic-vertex-ai`)
- [LangDB Provider](/providers/community-providers/langdb) (`@langdb/vercel-provider`)
+- [Dify Provider](/providers/community-providers/dify) (`dify-ai-provider`)
+- [Sarvam Provider](/providers/community-providers/sarvam) (`sarvam-ai-provider`)
## Self-Hosted Models
@@ -79,43 +91,56 @@ Additionally, any self-hosted provider that supports the OpenAI specification ca
The AI providers support different language models with various capabilities.
Here are the capabilities of popular models:
-| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming |
-| ------------------------------------------------------------------------ | ---------------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
-| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | |
-| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | |
-| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | |
-| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | |
-| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | |
-| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | |
-| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | |
-| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | |
-| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | |
-| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | |
-| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | |
-| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | |
-| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | |
-| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | |
-| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | |
-| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | |
-| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | |
-| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | |
-| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | |
-| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | |
-| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | |
-| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | |
-| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | |
-| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | |
-| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | |
-| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | |
-| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | |
+| Provider | Model | Image Input | Object Generation | Tool Usage | Tool Streaming |
+| ------------------------------------------------------------------------ | ------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-fast` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-mini` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-3-mini-fast` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-1212` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-2-vision-1212` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-beta` | | | | |
+| [xAI Grok](/providers/ai-sdk-providers/xai) | `grok-vision-beta` | | | | |
+| [Vercel](/providers/ai-sdk-providers/vercel) | `v0-1.0-md` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-mini` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-nano` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4-turbo` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o3` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o4-mini` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-mini` | | | | |
+| [OpenAI](/providers/ai-sdk-providers/openai) | `o1-preview` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-4-opus-20250514` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-4-sonnet-20250514` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-20250219` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20241022` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-sonnet-20240620` | | | | |
+| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-5-haiku-20241022` | | | | |
+| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-large-latest` | | | | |
+| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-large-latest` | | | | |
+| [Mistral](/providers/ai-sdk-providers/mistral) | `mistral-small-latest` | | | | |
+| [Mistral](/providers/ai-sdk-providers/mistral) | `pixtral-12b-2409` | | | | |
+| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-2.0-flash-exp` | | | | |
+| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-flash` | | | | |
+| [Google Generative AI](/providers/ai-sdk-providers/google-generative-ai) | `gemini-1.5-pro` | | | | |
+| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-2.0-flash-exp` | | | | |
+| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-flash` | | | | |
+| [Google Vertex](/providers/ai-sdk-providers/google-vertex) | `gemini-1.5-pro` | | | | |
+| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-chat` | | | | |
+| [DeepSeek](/providers/ai-sdk-providers/deepseek) | `deepseek-reasoner` | | | | |
+| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-8b` | | | | |
+| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.1-70b` | | | | |
+| [Cerebras](/providers/ai-sdk-providers/cerebras) | `llama3.3-70b` | | | | |
+| [Groq](/providers/ai-sdk-providers/groq) | `meta-llama/llama-4-scout-17b-16e-instruct` | | | | |
+| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.3-70b-versatile` | | | | |
+| [Groq](/providers/ai-sdk-providers/groq) | `llama-3.1-8b-instant` | | | | |
+| [Groq](/providers/ai-sdk-providers/groq) | `mixtral-8x7b-32768` | | | | |
+| [Groq](/providers/ai-sdk-providers/groq) | `gemma2-9b-it` | | | | |
This table is not exhaustive. Additional models can be found in the provider
diff --git a/content/docs/02-foundations/03-prompts.mdx b/content/docs/02-foundations/03-prompts.mdx
index a7ad75f94d8a..e1759e3976d2 100644
--- a/content/docs/02-foundations/03-prompts.mdx
+++ b/content/docs/02-foundations/03-prompts.mdx
@@ -92,6 +92,84 @@ Instead of sending a text in the `content` property, you can send an array of pa
models](./providers-and-models#model-capabilities).
+### Provider Options
+
+You can pass through additional provider-specific metadata to enable provider-specific functionality at 3 levels.
+
+#### Function Call Level
+
+Functions like [`streamText`](/docs/reference/ai-sdk-core/stream-text#provider-options) or [`generateText`](/docs/reference/ai-sdk-core/generate-text#provider-options) accept a `providerOptions` property.
+
+Adding provider options at the function call level should be used when you do not need granular control over where the provider options are applied.
+
+```ts
+const { text } = await generateText({
+ model: azure('your-deployment-name'),
+ providerOptions: {
+ openai: {
+ reasoningEffort: 'low',
+ },
+ },
+});
+```
+
+#### Message Level
+
+For granular control over applying provider options at the message level, you can pass `providerOptions` to the message object:
+
+```ts
+const messages = [
+ {
+ role: 'system',
+ content: 'Cached system message',
+ providerOptions: {
+ // Sets a cache control breakpoint on the system message
+ anthropic: { cacheControl: { type: 'ephemeral' } },
+ },
+ },
+];
+```
+
+#### Message Part Level
+
+Certain provider-specific options require configuration at the message part level:
+
+```ts
+const messages = [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'text',
+ text: 'Describe the image in detail.',
+ providerOptions: {
+ openai: { imageDetail: 'low' },
+ },
+ },
+ {
+ type: 'image',
+ image:
+ 'https://github.com/vercel/ai/blob/main/examples/ai-core/data/comic-cat.png?raw=true',
+ // Sets image detail configuration for image part:
+ providerOptions: {
+ openai: { imageDetail: 'low' },
+ },
+ },
+ ],
+ },
+];
+```
+
+
+ AI SDK UI hooks like [`useChat`](/docs/reference/ai-sdk-ui/use-chat) return
+ arrays of `UIMessage` objects, which do not support provider options. We
+ recommend using the
+ [`convertToCoreMessages`](/docs/reference/ai-sdk-ui/convert-to-core-messages)
+ function to convert `UIMessage` objects to
+ [`CoreMessage`](/docs/reference/ai-sdk-core/core-message) objects before
+ applying or appending message(s) or message parts with `providerOptions`.
+
+
### User Messages
#### Text Parts
diff --git a/content/docs/02-foundations/04-tools.mdx b/content/docs/02-foundations/04-tools.mdx
index ac8a2acf3338..f59db24dcbdb 100644
--- a/content/docs/02-foundations/04-tools.mdx
+++ b/content/docs/02-foundations/04-tools.mdx
@@ -92,8 +92,11 @@ When you work with tools, you typically need a mix of application specific tools
There are several providers that offer pre-built tools as **toolkits** that you can use out of the box:
- **[agentic](https://github.com/transitive-bullshit/agentic)** - A collection of 20+ tools. Most tools connect to access external APIs such as [Exa](https://exa.ai/) or [E2B](https://e2b.dev/).
-- **[browserbase](https://docs.browserbase.com/integrations/vercel-ai/introduction)** - Browser tool that runs a headless browser
+- **[browserbase](https://docs.browserbase.com/integrations/vercel/introduction#vercel-ai-integration)** - Browser tool that runs a headless browser
+- **[browserless](https://docs.browserless.io/ai-integrations/vercel-ai-sdk)** - Browser automation service with AI integration - self hosted or cloud based
+- **[Smithery](https://smithery.ai/docs/use/connect)** - Smithery provides an open marketplace of 6K+ MCPs, including [Browserbase](https://browserbase.com/) and [Exa](https://exa.ai/).
- **[Stripe agent tools](https://docs.stripe.com/agents)** - Tools for interacting with Stripe.
+- **[StackOne ToolSet](https://docs.stackone.com/agents)** - Agentic integrations for hundreds of [enterprise SaaS](https://www.stackone.com/integrations)
- **[Toolhouse](https://docs.toolhouse.ai/toolhouse/using-vercel-ai)** - AI function-calling in 3 lines of code for over 25 different actions.
- **[Agent Tools](https://ai-sdk-agents.vercel.app/?item=introduction)** - A collection of tools for agents.
- **[AI Tool Maker](https://github.com/nihaocami/ai-tool-maker)** - A CLI utility to generate AI SDK tools from OpenAPI specs.
diff --git a/content/docs/02-getting-started/02-nextjs-app-router.mdx b/content/docs/02-getting-started/02-nextjs-app-router.mdx
index 8a8bc840e606..95451e468dc2 100644
--- a/content/docs/02-getting-started/02-nextjs-app-router.mdx
+++ b/content/docs/02-getting-started/02-nextjs-app-router.mdx
@@ -234,17 +234,17 @@ In this updated code:
- Defines parameters using a Zod schema, specifying that it requires a `location` string to execute this tool. The model will attempt to extract this parameter from the context of the conversation. If it can't, it will ask the user for the missing information.
- Defines an `execute` function that simulates getting weather data (in this case, it returns a random temperature). This is an asynchronous function running on the server so you can fetch real data from an external API.
- Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `toolInvocations` that is available on the message object.
+Now your chatbot can "fetch" weather information for any location the user asks about. When the model determines it needs to use the weather tool, it will generate a tool call with the necessary parameters. The `execute` function will then be automatically run, and you can access the results via `tool-invocations` part that is available on the `message.parts` array.
Try asking something like "What's the weather in New York?" and see how the model uses the new tool.
-Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result in the `toolInvocations` key of the message object.
+Notice the blank response in the UI? This is because instead of generating a text response, the model generated a tool call. You can access the tool call and subsequent tool result via the `tool-invocation` part of the `message.parts` array.
### Update the UI
To display the tool invocations in your UI, update your `app/page.tsx` file:
-```tsx filename="app/page.tsx" highlight="18-24"
+```tsx filename="app/page.tsx" highlight="16-21"
'use client';
import { useChat } from '@ai-sdk/react';
diff --git a/content/docs/02-getting-started/04-svelte.mdx b/content/docs/02-getting-started/04-svelte.mdx
index 36bd75871702..6de2e30f49c4 100644
--- a/content/docs/02-getting-started/04-svelte.mdx
+++ b/content/docs/02-getting-started/04-svelte.mdx
@@ -139,8 +139,17 @@ Update your root page (`src/routes/+page.svelte`) with the following code to sho
- {#each chat.messages as message}
- - {message.role}: {message.content}
+ {#each chat.messages as message, messageIndex (messageIndex)}
+ -
+
{message.role}
+
+ {#each message.parts as part, partIndex (partIndex)}
+ {#if part.type === 'text'}
+
{part.text}
+ {/if}
+ {/each}
+
+
{/each}