From 9e87162bf2164ab3b9301bcbd25ccd7e82c6e2f7 Mon Sep 17 00:00:00 2001 From: jdecroock Date: Tue, 23 Dec 2025 12:37:23 +0100 Subject: [PATCH 1/4] Add preact integration --- .changeset/tired-years-stick.md | 5 + packages/typescript/ai-preact/CHANGELOG.md | 1 + packages/typescript/ai-preact/README.md | 157 ++ packages/typescript/ai-preact/package.json | 57 + packages/typescript/ai-preact/src/index.ts | 17 + packages/typescript/ai-preact/src/types.ts | 98 ++ packages/typescript/ai-preact/src/use-chat.ts | 159 ++ .../typescript/ai-preact/tests/test-utils.ts | 29 + .../ai-preact/tests/use-chat.test.ts | 1323 +++++++++++++++++ packages/typescript/ai-preact/tsconfig.json | 11 + packages/typescript/ai-preact/vite.config.ts | 36 + .../typescript/ai-preact/vitest.config.ts | 35 + pnpm-lock.yaml | 444 ++++++ 13 files changed, 2372 insertions(+) create mode 100644 .changeset/tired-years-stick.md create mode 100644 packages/typescript/ai-preact/CHANGELOG.md create mode 100644 packages/typescript/ai-preact/README.md create mode 100644 packages/typescript/ai-preact/package.json create mode 100644 packages/typescript/ai-preact/src/index.ts create mode 100644 packages/typescript/ai-preact/src/types.ts create mode 100644 packages/typescript/ai-preact/src/use-chat.ts create mode 100644 packages/typescript/ai-preact/tests/test-utils.ts create mode 100644 packages/typescript/ai-preact/tests/use-chat.test.ts create mode 100644 packages/typescript/ai-preact/tsconfig.json create mode 100644 packages/typescript/ai-preact/vite.config.ts create mode 100644 packages/typescript/ai-preact/vitest.config.ts diff --git a/.changeset/tired-years-stick.md b/.changeset/tired-years-stick.md new file mode 100644 index 00000000..b3f57a62 --- /dev/null +++ b/.changeset/tired-years-stick.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-preact': minor +--- + +Create initial release for preact diff --git a/packages/typescript/ai-preact/CHANGELOG.md b/packages/typescript/ai-preact/CHANGELOG.md new file mode 100644 index 00000000..fffbac3c --- /dev/null +++ b/packages/typescript/ai-preact/CHANGELOG.md @@ -0,0 +1 @@ +# @tanstack/ai-preact diff --git a/packages/typescript/ai-preact/README.md b/packages/typescript/ai-preact/README.md new file mode 100644 index 00000000..f0553d37 --- /dev/null +++ b/packages/typescript/ai-preact/README.md @@ -0,0 +1,157 @@ +
+ +
+ +
+ +
+ + + + + + + + + +
+ +
+ + semantic-release + + + Release + + + Follow @TanStack + +
+ +
+ +### [Become a Sponsor!](https://github.com/sponsors/tannerlinsley/) +
+ +# TanStack AI + +A powerful, type-safe AI SDK for building AI-powered applications. + +- Provider-agnostic adapters (OpenAI, Anthropic, Gemini, Ollama, etc.) +- **Tree-shakeable adapters** - Import only what you need for smaller bundles +- **Multimodal content support** - Send images, audio, video, and documents +- **Image generation** - Generate images with OpenAI DALL-E/GPT-Image and Gemini Imagen +- Chat completion, streaming, and agent loop strategies +- Headless chat state management with adapters (SSE, HTTP stream, custom) +- Isomorphic type-safe tools with server/client execution +- **Enhanced integration with TanStack Start** - Share implementations between AI tools and server functions + +### Read the docs → + +## Tree-Shakeable Adapters + +Import only the functionality you need for smaller bundle sizes: + +```typescript +// Only chat functionality - no summarization code bundled +import { openaiText } from '@tanstack/ai-openai/adapters' +import { generate } from '@tanstack/ai' + +const textAdapter = openaiText() + +const result = generate({ + adapter: textAdapter, + model: 'gpt-4o', + messages: [{ role: 'user', content: [{ type: 'text', content: 'Hello!' }] }], +}) + +for await (const chunk of result) { + console.log(chunk) +} +``` + +Available adapters: `openaiText`, `openaiEmbed`, `openaiSummarize`, `anthropicText`, `geminiText`, `ollamaText`, and more. + +## Bonus: TanStack Start Integration + +TanStack AI works with **any** framework (Next.js, Express, Remix, etc.). + +**With TanStack Start**, you get a bonus: share implementations between AI tools and server functions with `createServerFnTool`: + +```typescript +import { createServerFnTool } from '@tanstack/ai-preact' + +// Define once, get AI tool AND server function (TanStack Start only) +const getProducts = createServerFnTool({ + name: 'getProducts', + inputSchema: z.object({ query: z.string() }), + execute: async ({ query }) => db.products.search(query), +}) + +// Use in AI chat +chat({ tools: [getProducts.server] }) + +// Call directly from components (no API endpoint needed!) +const products = await getProducts.serverFn({ query: 'laptop' }) +``` + +No duplicate logic, full type safety, automatic validation. The `serverFn` feature requires TanStack Start. See [docs](https://tanstack.com/ai) for details. + +## Get Involved + +- We welcome issues and pull requests! +- Participate in [GitHub discussions](https://github.com/TanStack/ai/discussions) +- Chat with the community on [Discord](https://discord.com/invite/WrRKjPJ) +- See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions + +## Partners + + + + + + +
+ + + + + CodeRabbit + + + + + + + + Cloudflare + + +
+ +
+AI & you? +

+We're looking for TanStack AI Partners to join our mission! Partner with us to push the boundaries of TanStack AI and build amazing things together. +

+LET'S CHAT +
+ +## Explore the TanStack Ecosystem + +- TanStack Config – Tooling for JS/TS packages +- TanStack DB – Reactive sync client store +- TanStack Devtools – Unified devtools panel +- TanStack Form – Type‑safe form state +- TanStack Pacer – Debouncing, throttling, batching +- TanStack Query – Async state & caching +- TanStack Ranger – Range & slider primitives +- TanStack Router – Type‑safe routing, caching & URL state +- TanStack Start – Full‑stack SSR & streaming +- TanStack Store – Reactive data store +- TanStack Table – Headless datagrids +- TanStack Virtual – Virtualized rendering + +… and more at TanStack.com » + + diff --git a/packages/typescript/ai-preact/package.json b/packages/typescript/ai-preact/package.json new file mode 100644 index 00000000..ffcb66d8 --- /dev/null +++ b/packages/typescript/ai-preact/package.json @@ -0,0 +1,57 @@ +{ + "name": "@tanstack/ai-preact", + "version": "0.0.0", + "description": "Preact hooks for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-preact" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc", + "test:build": "publint --strict", + "build": "vite build" + }, + "keywords": [ + "ai", + "preact", + "hooks", + "tanstack", + "chat", + "streaming" + ], + "dependencies": { + "@tanstack/ai-client": "workspace:*" + }, + "devDependencies": { + "@testing-library/preact": "^3.2.4", + "@vitest/coverage-v8": "4.0.14", + "jsdom": "^27.2.0", + "preact": "^10.26.9", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "preact": ">=10.0.0" + } +} diff --git a/packages/typescript/ai-preact/src/index.ts b/packages/typescript/ai-preact/src/index.ts new file mode 100644 index 00000000..763df52e --- /dev/null +++ b/packages/typescript/ai-preact/src/index.ts @@ -0,0 +1,17 @@ +export { useChat } from './use-chat' +export type { + UseChatOptions, + UseChatReturn, + UIMessage, + ChatRequestBody, +} from './types' + +export { + fetchServerSentEvents, + fetchHttpStream, + stream, + createChatClientOptions, + type ConnectionAdapter, + type FetchConnectionOptions, + type InferChatMessages, +} from '@tanstack/ai-client' diff --git a/packages/typescript/ai-preact/src/types.ts b/packages/typescript/ai-preact/src/types.ts new file mode 100644 index 00000000..21333907 --- /dev/null +++ b/packages/typescript/ai-preact/src/types.ts @@ -0,0 +1,98 @@ +import type { AnyClientTool, ModelMessage } from '@tanstack/ai' +import type { + ChatClientOptions, + ChatRequestBody, + UIMessage, +} from '@tanstack/ai-client' + +// Re-export types from ai-client +export type { UIMessage, ChatRequestBody } + +/** + * Options for the useChat hook. + * + * This extends ChatClientOptions but omits the state change callbacks that are + * managed internally by Preact state: + * - `onMessagesChange` - Managed by Preact state (exposed as `messages`) + * - `onLoadingChange` - Managed by Preact state (exposed as `isLoading`) + * - `onErrorChange` - Managed by Preact state (exposed as `error`) + * + * All other callbacks (onResponse, onChunk, onFinish, onError) are + * passed through to the underlying ChatClient and can be used for side effects. + * + * Note: Connection and body changes will recreate the ChatClient instance. + * To update these options, remount the component or use a key prop. + */ +export type UseChatOptions = any> = + Omit< + ChatClientOptions, + 'onMessagesChange' | 'onLoadingChange' | 'onErrorChange' + > + +export interface UseChatReturn< + TTools extends ReadonlyArray = any, +> { + /** + * Current messages in the conversation + */ + messages: Array> + + /** + * Send a message and get a response + */ + sendMessage: (content: string) => Promise + + /** + * Append a message to the conversation + */ + append: (message: ModelMessage | UIMessage) => Promise + + /** + * Add the result of a client-side tool execution + */ + addToolResult: (result: { + toolCallId: string + tool: string + output: any + state?: 'output-available' | 'output-error' + errorText?: string + }) => Promise + + /** + * Respond to a tool approval request + */ + addToolApprovalResponse: (response: { + id: string // approval.id, not toolCallId + approved: boolean + }) => Promise + + /** + * Reload the last assistant message + */ + reload: () => Promise + + /** + * Stop the current response generation + */ + stop: () => void + + /** + * Whether a response is currently being generated + */ + isLoading: boolean + + /** + * Current error, if any + */ + error: Error | undefined + + /** + * Set messages manually + */ + setMessages: (messages: Array>) => void + + /** + * Clear all messages + */ + clear: () => void +} diff --git a/packages/typescript/ai-preact/src/use-chat.ts b/packages/typescript/ai-preact/src/use-chat.ts new file mode 100644 index 00000000..9dc9a066 --- /dev/null +++ b/packages/typescript/ai-preact/src/use-chat.ts @@ -0,0 +1,159 @@ +import { + useCallback, + useEffect, + useId, + useMemo, + useRef, + useState, +} from 'preact/hooks' +import { ChatClient } from '@tanstack/ai-client' +import type { AnyClientTool, ModelMessage } from '@tanstack/ai' + +import type { UIMessage, UseChatOptions, UseChatReturn } from './types' + +export function useChat = any>( + options: UseChatOptions, +): UseChatReturn { + const hookId = useId() + const clientId = options.id || hookId + + const [messages, setMessages] = useState>>( + options.initialMessages || [], + ) + const [isLoading, setIsLoading] = useState(false) + const [error, setError] = useState(undefined) + + // Track current messages in a ref to preserve them when client is recreated + const messagesRef = useRef>>( + options.initialMessages || [], + ) + const isFirstMountRef = useRef(true) + const optionsRef = useRef>(options) + + optionsRef.current = options + + useEffect(() => { + messagesRef.current = messages + }, [messages]) + + + const client = useMemo(() => { + // On first mount, use initialMessages. On subsequent recreations, preserve existing messages. + const messagesToUse = isFirstMountRef.current + ? options.initialMessages || [] + : messagesRef.current + + isFirstMountRef.current = false + + return new ChatClient({ + connection: optionsRef.current.connection, + id: clientId, + initialMessages: messagesToUse, + body: optionsRef.current.body, + onResponse: optionsRef.current.onResponse, + onChunk: optionsRef.current.onChunk, + onFinish: optionsRef.current.onFinish, + onError: optionsRef.current.onError, + tools: optionsRef.current.tools, + streamProcessor: options.streamProcessor, + onMessagesChange: (newMessages: Array>) => { + setMessages(newMessages) + }, + onLoadingChange: (newIsLoading: boolean) => { + setIsLoading(newIsLoading) + }, + onErrorChange: (newError: Error | undefined) => { + setError(newError) + }, + }) + }, [clientId]) + + // Sync initial messages on mount only + // Note: initialMessages are passed to ChatClient constructor, but we also + // set them here to ensure Preact state is in sync + useEffect(() => { + if (options.initialMessages && options.initialMessages.length && !messages.length) { + client.setMessagesManually(options.initialMessages) + } + }, []) + + // Cleanup on unmount: stop any in-flight requests + // Note: We only cleanup when client changes or component unmounts. + // DO NOT include isLoading in dependencies - that would cause the cleanup + // to run when isLoading changes, aborting continuation requests. + useEffect(() => { + return () => { + client.stop() + } + }, [client]) + + // Note: Callback options (onResponse, onChunk, onFinish, onError, onToolCall) + // are captured at client creation time. Changes to these callbacks require + // remounting the component or changing the connection to recreate the client. + const sendMessage = useCallback( + async (content: string) => { + await client.sendMessage(content) + }, + [client], + ) + + const append = useCallback( + async (message: ModelMessage | UIMessage) => { + await client.append(message) + }, + [client], + ) + + const reload = useCallback(async () => { + await client.reload() + }, [client]) + + const stop = useCallback(() => { + client.stop() + }, [client]) + + const clear = useCallback(() => { + client.clear() + }, [client]) + + const setMessagesManually = useCallback( + (newMessages: Array>) => { + client.setMessagesManually(newMessages) + }, + [client], + ) + + const addToolResult = useCallback( + async (result: { + toolCallId: string + tool: string + output: any + state?: 'output-available' | 'output-error' + errorText?: string + }) => { + await client.addToolResult(result) + }, + [client], + ) + + const addToolApprovalResponse = useCallback( + async (response: { id: string; approved: boolean }) => { + await client.addToolApprovalResponse(response) + }, + [client], + ) + + return { + messages, + sendMessage, + append, + reload, + stop, + isLoading, + error, + setMessages: setMessagesManually, + clear, + addToolResult, + addToolApprovalResponse, + } +} diff --git a/packages/typescript/ai-preact/tests/test-utils.ts b/packages/typescript/ai-preact/tests/test-utils.ts new file mode 100644 index 00000000..5315f07a --- /dev/null +++ b/packages/typescript/ai-preact/tests/test-utils.ts @@ -0,0 +1,29 @@ +// Re-export test utilities from ai-client +import { renderHook } from '@testing-library/preact' +import { useChat } from '../src/use-chat' +import type {RenderHookResult} from '@testing-library/preact'; +import type { UseChatOptions, UseChatReturn } from '../src/types' + +export { + createMockConnectionAdapter, + createTextChunks, + createToolCallChunks, +} from '../../ai-client/tests/test-utils' + +/** + * Render the useChat hook with testing utilities + * + * @example + * ```typescript + * const { result } = renderUseChat({ + * connection: createMockConnectionAdapter({ chunks: [...] }) + * }); + * + * await result.current.sendMessage("Hello"); + * ``` + */ +export function renderUseChat( + options: UseChatOptions = {} as UseChatOptions, +): RenderHookResult { + return renderHook(() => useChat(options)) +} diff --git a/packages/typescript/ai-preact/tests/use-chat.test.ts b/packages/typescript/ai-preact/tests/use-chat.test.ts new file mode 100644 index 00000000..b7bbedb6 --- /dev/null +++ b/packages/typescript/ai-preact/tests/use-chat.test.ts @@ -0,0 +1,1323 @@ +import { describe, expect, it, vi } from 'vitest' +import { act, waitFor } from '@testing-library/preact' +import { + createMockConnectionAdapter, + createTextChunks, + createToolCallChunks, + renderUseChat, +} from './test-utils' +import type { UIMessage } from '../src/types' +import type { ModelMessage } from '@tanstack/ai' + +describe('useChat', () => { + describe('initialization', () => { + it('should initialize with default state', () => { + const adapter = createMockConnectionAdapter() + const { result } = renderUseChat({ connection: adapter }) + + expect(result.current.messages).toEqual([]) + expect(result.current.isLoading).toBe(false) + expect(result.current.error).toBeUndefined() + }) + + it('should initialize with provided messages', () => { + const adapter = createMockConnectionAdapter() + const initialMessages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + }, + ] + + const { result } = renderUseChat({ + connection: adapter, + initialMessages, + }) + + expect(result.current.messages).toEqual(initialMessages) + }) + + it('should use provided id', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + + const { result } = renderUseChat({ + connection: adapter, + id: 'custom-id', + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + // Message IDs are generated independently, not based on client ID + // Just verify messages exist and have IDs + const messageId = result.current.messages[0]?.id + expect(messageId).toBeDefined() + expect(typeof messageId).toBe('string') + }) + + it('should generate id if not provided', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + // Message IDs should have a generated prefix (not "custom-id-") + const messageId = result.current.messages[0]?.id + expect(messageId).toBeTruthy() + expect(messageId).not.toMatch(/^custom-id-/) + }) + + it('should maintain client instance across re-renders', () => { + const adapter = createMockConnectionAdapter() + const { result, rerender } = renderUseChat({ connection: adapter }) + + const initialMessages = result.current.messages + + rerender() + + // Client should be the same instance, state should persist + expect(result.current.messages).toBe(initialMessages) + }) + }) + + describe('state synchronization', () => { + it('should update messages via onMessagesChange callback', async () => { + const chunks = createTextChunks('Hello, world!') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThanOrEqual(2) + }) + + const userMessage = result.current.messages.find((m) => m.role === 'user') + expect(userMessage).toBeDefined() + if (userMessage) { + expect(userMessage.parts[0]).toEqual({ + type: 'text', + content: 'Hello', + }) + } + }) + + it('should update loading state via onLoadingChange callback', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + expect(result.current.isLoading).toBe(false) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + // Should be loading during send + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + await act(async () => { + await sendPromise! + }) + + // Should not be loading after completion + await waitFor(() => { + expect(result.current.isLoading).toBe(false) + }) + }) + + it('should update error state via onErrorChange callback', async () => { + const error = new Error('Connection failed') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + expect(result.current.error?.message).toBe('Connection failed') + }) + + it('should persist state across re-renders', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result, rerender } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + const messageCount = result.current.messages.length + + rerender() + + // State should persist after re-render + expect(result.current.messages.length).toBe(messageCount) + }) + }) + + describe('sendMessage', () => { + it('should send a message and append it', async () => { + const chunks = createTextChunks('Hello, world!') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + const userMessage = result.current.messages.find((m) => m.role === 'user') + expect(userMessage).toBeDefined() + if (userMessage) { + expect(userMessage.parts[0]).toEqual({ + type: 'text', + content: 'Hello', + }) + } + }) + + it('should create assistant message from stream chunks', async () => { + const chunks = createTextChunks('Hello, world!') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + }) + + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + if (assistantMessage) { + const textPart = assistantMessage.parts.find((p) => p.type === 'text') + expect(textPart).toBeDefined() + if (textPart) { + expect(textPart.content).toBe('Hello, world!') + } + } + }) + + it('should not send empty messages', async () => { + const adapter = createMockConnectionAdapter() + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('') + await result.current.sendMessage(' ') + }) + + expect(result.current.messages.length).toBe(0) + }) + + it('should not send message while loading', async () => { + const adapter = createMockConnectionAdapter({ + chunks: createTextChunks('Response'), + chunkDelay: 100, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + const promise1 = result.current.sendMessage('First') + const promise2 = result.current.sendMessage('Second') + await Promise.all([promise1, promise2]) + }) + + // Should only have one user message since second was blocked + const userMessages = result.current.messages.filter( + (m) => m.role === 'user', + ) + expect(userMessages.length).toBe(1) + }) + + it('should handle errors during sendMessage', async () => { + const error = new Error('Network error') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + expect(result.current.error?.message).toBe('Network error') + expect(result.current.isLoading).toBe(false) + }) + }) + + describe('append', () => { + it('should append a UIMessage', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + const message: UIMessage = { + id: 'user-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + } + + await act(async () => { + await result.current.append(message) + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + expect(result.current.messages[0]?.id).toBe('user-1') + }) + + it('should convert and append a ModelMessage', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + const modelMessage: ModelMessage = { + role: 'user', + content: 'Hello from model', + } + + await act(async () => { + await result.current.append(modelMessage) + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + expect(result.current.messages[0]?.role).toBe('user') + expect(result.current.messages[0]?.parts[0]).toEqual({ + type: 'text', + content: 'Hello from model', + }) + }) + + it('should handle errors during append', async () => { + const error = new Error('Append failed') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const { result } = renderUseChat({ connection: adapter }) + + const message: UIMessage = { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Hello' }], + createdAt: new Date(), + } + + await act(async () => { + await result.current.append(message) + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + expect(result.current.error?.message).toBe('Append failed') + }) + }) + + describe('reload', () => { + it('should reload the last assistant message', async () => { + const chunks1 = createTextChunks('First response') + const chunks2 = createTextChunks('Second response') + let callCount = 0 + + const adapter = createMockConnectionAdapter({ + chunks: chunks1, + onConnect: () => { + callCount++ + // Return different chunks on second call + if (callCount === 2) { + return chunks2 + } + return undefined + }, + }) + + // Create a new adapter for the second call + const adapter2 = createMockConnectionAdapter({ chunks: chunks2 }) + const { result, rerender } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + }) + + // Reload with new adapter + rerender({ connection: adapter2 }) + await act(async () => { + await result.current.reload() + }) + + await waitFor(() => { + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + }) + + // Should have reloaded (though content might be same if adapter doesn't change) + const messagesAfterReload = result.current.messages + expect(messagesAfterReload.length).toBeGreaterThan(0) + }) + + it('should maintain conversation history after reload', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('First') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThanOrEqual(2) + }) + + const messageCountBeforeReload = result.current.messages.length + + await act(async () => { + await result.current.reload() + }) + + await waitFor(() => { + // Should still have the same number of messages (user + assistant) + expect(result.current.messages.length).toBeGreaterThanOrEqual(2) + }) + + // History should be maintained + expect(result.current.messages.length).toBeGreaterThanOrEqual( + messageCountBeforeReload, + ) + }) + + it('should handle errors during reload', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThanOrEqual(2) + }) + + // Note: We can't easily change the adapter after creation, + // so this test verifies error handling in general + // The actual error would come from the connection adapter + expect(result.current.reload).toBeDefined() + }) + }) + + describe('stop', () => { + it('should stop current generation', async () => { + const chunks = createTextChunks('Long response that will be stopped') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + // Wait for loading to start + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + // Stop the generation + act(() => { + result.current.stop() + }) + + await act(async () => { + await sendPromise! + }) + + // Should eventually stop loading + await waitFor( + () => { + expect(result.current.isLoading).toBe(false) + }, + { timeout: 1000 }, + ) + }) + + it('should be safe to call multiple times', () => { + const adapter = createMockConnectionAdapter() + const { result } = renderUseChat({ connection: adapter }) + + // Should not throw + result.current.stop() + result.current.stop() + result.current.stop() + + expect(result.current.isLoading).toBe(false) + }) + + it('should clear loading state when stopped', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + act(() => { + result.current.stop() + }) + + await waitFor( + () => { + expect(result.current.isLoading).toBe(false) + }, + { timeout: 1000 }, + ) + + await act(async () => { + await sendPromise!.catch(() => { + // Ignore errors from stopped request + }) + }) + }) + }) + + describe('clear', () => { + it('should clear all messages', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + act(() => { + result.current.clear() + }) + + await waitFor(() => { + expect(result.current.messages).toEqual([]) + }) + }) + + it('should reset to initial state', async () => { + const initialMessages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Initial' }], + createdAt: new Date(), + }, + ] + + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ + connection: adapter, + initialMessages, + }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan( + initialMessages.length, + ) + }) + + act(() => { + result.current.clear() + }) + + // Should clear all messages, not reset to initial + await waitFor(() => { + expect(result.current.messages).toEqual([]) + }) + }) + + it('should maintain client instance after clear', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + act(() => { + result.current.clear() + }) + + // Should still be able to send messages + await act(async () => { + await result.current.sendMessage('New message') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + }) + }) + + describe('setMessages', () => { + it('should manually set messages', async () => { + const adapter = createMockConnectionAdapter() + const { result } = renderUseChat({ connection: adapter }) + + const newMessages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Manual' }], + createdAt: new Date(), + }, + ] + + act(() => { + result.current.setMessages(newMessages) + }) + + await waitFor(() => { + expect(result.current.messages).toEqual(newMessages) + }) + }) + + it('should update state immediately', async () => { + const adapter = createMockConnectionAdapter() + const { result } = renderUseChat({ connection: adapter }) + + expect(result.current.messages).toEqual([]) + + const newMessages: Array = [ + { + id: 'msg-1', + role: 'user', + parts: [{ type: 'text', content: 'Immediate' }], + createdAt: new Date(), + }, + ] + + act(() => { + result.current.setMessages(newMessages) + }) + + // Wait for state to update + await waitFor(() => { + expect(result.current.messages).toEqual(newMessages) + }) + }) + + it('should replace all existing messages', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Hello') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + const originalCount = result.current.messages.length + + const newMessages: Array = [ + { + id: 'msg-new', + role: 'user', + parts: [{ type: 'text', content: 'Replaced' }], + createdAt: new Date(), + }, + ] + + act(() => { + result.current.setMessages(newMessages) + }) + + await waitFor(() => { + expect(result.current.messages).toEqual(newMessages) + expect(result.current.messages.length).toBe(1) + expect(result.current.messages.length).not.toBe(originalCount) + }) + }) + }) + + describe('callbacks', () => { + it('should call onChunk callback when chunks are received', async () => { + const chunks = createTextChunks('Hello') + const adapter = createMockConnectionAdapter({ chunks }) + const onChunk = vi.fn() + + const { result } = renderUseChat({ + connection: adapter, + onChunk, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(onChunk).toHaveBeenCalled() + }) + + // Should have been called for each chunk + expect(onChunk.mock.calls.length).toBeGreaterThan(0) + }) + + it('should call onFinish callback when response finishes', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const onFinish = vi.fn() + + const { result } = renderUseChat({ + connection: adapter, + onFinish, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(onFinish).toHaveBeenCalled() + }) + + const finishedMessage = onFinish.mock.calls[0]?.[0] + expect(finishedMessage).toBeDefined() + expect(finishedMessage.role).toBe('assistant') + }) + + it('should call onError callback when error occurs', async () => { + const error = new Error('Test error') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const onError = vi.fn() + + const { result } = renderUseChat({ + connection: adapter, + onError, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(onError).toHaveBeenCalled() + }) + + expect(onError.mock.calls[0]?.[0].message).toBe('Test error') + }) + + it('should call onResponse callback when response is received', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ chunks }) + const onResponse = vi.fn() + + const { result } = renderUseChat({ + connection: adapter, + onResponse, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + // onResponse may or may not be called depending on adapter implementation + // This test verifies the callback is passed through + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + }) + }) + + describe('edge cases and error handling', () => { + describe('options changes', () => { + it('should maintain client instance when options change', () => { + const adapter1 = createMockConnectionAdapter() + const { result, rerender } = renderUseChat({ connection: adapter1 }) + + const initialMessages = result.current.messages + + const adapter2 = createMockConnectionAdapter() + rerender({ connection: adapter2 }) + + // Client instance should persist (current implementation doesn't update) + // This documents current behavior - options changes don't update client + expect(result.current.messages).toBe(initialMessages) + }) + + it('should handle body changes', () => { + const adapter = createMockConnectionAdapter() + const { result, rerender } = renderUseChat({ + connection: adapter, + body: { userId: '123' }, + }) + + rerender({ + connection: adapter, + body: { userId: '456' }, + }) + + // Should not throw + expect(result.current).toBeDefined() + }) + + it('should handle callback changes', () => { + const adapter = createMockConnectionAdapter() + const onChunk1 = vi.fn() + const { result, rerender } = renderUseChat({ + connection: adapter, + onChunk: onChunk1, + }) + + const onChunk2 = vi.fn() + rerender({ + connection: adapter, + onChunk: onChunk2, + }) + + // Should not throw + expect(result.current).toBeDefined() + }) + }) + + describe('unmount behavior', () => { + it('should not update state after unmount', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 100, + }) + const { result, unmount } = renderUseChat({ connection: adapter }) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + // Unmount before completion + unmount() + + await sendPromise!.catch(() => { + // Ignore errors + }) + + // State updates after unmount should be ignored (Preact handles this) + // This test documents the expected behavior + expect(result.current).toBeDefined() + }) + + it('should stop loading on unmount if active', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 100, + }) + const { result, unmount } = renderUseChat({ connection: adapter }) + + act(() => { + result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + unmount() + + // After unmount, Preact will clean up + // The actual cleanup is handled by Preact's lifecycle + expect(result.current.isLoading).toBe(true) // Still true in test, but component is unmounted + }) + }) + + describe('concurrent operations', () => { + it('should handle multiple sendMessage calls', async () => { + const adapter = createMockConnectionAdapter({ + chunks: createTextChunks('Response'), + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + const promise1 = result.current.sendMessage('First') + const promise2 = result.current.sendMessage('Second') + await Promise.all([promise1, promise2]) + }) + + // Should only have one user message (second should be blocked) + const userMessages = result.current.messages.filter( + (m) => m.role === 'user', + ) + expect(userMessages.length).toBe(1) + }) + + it('should handle stop during sendMessage', async () => { + const chunks = createTextChunks('Long response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + act(() => { + result.current.stop() + }) + + await waitFor( + () => { + expect(result.current.isLoading).toBe(false) + }, + { timeout: 1000 }, + ) + + await act(async () => { + await sendPromise!.catch(() => { + // Ignore errors from stopped request + }) + }) + }) + + it('should handle reload during active stream', async () => { + const chunks = createTextChunks('Response') + const adapter = createMockConnectionAdapter({ + chunks, + chunkDelay: 50, + }) + const { result } = renderUseChat({ connection: adapter }) + + let sendPromise: Promise + act(() => { + sendPromise = result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.isLoading).toBe(true) + }) + + // Try to reload while sending + await act(async () => { + const reloadPromise = result.current.reload() + await Promise.allSettled([sendPromise!, reloadPromise]) + }) + + // Should eventually complete + await waitFor(() => { + expect(result.current.isLoading).toBe(false) + }) + }) + }) + + describe('error scenarios', () => { + it('should handle network errors', async () => { + const error = new Error('Network request failed') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + expect(result.current.error?.message).toBe('Network request failed') + expect(result.current.isLoading).toBe(false) + }) + + it('should handle stream errors', async () => { + const error = new Error('Stream error') + const adapter = createMockConnectionAdapter({ + shouldError: true, + error, + }) + const { result } = renderUseChat({ connection: adapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + expect(result.current.error?.message).toBe('Stream error') + }) + + it('should clear error on successful operation', async () => { + const errorAdapter = createMockConnectionAdapter({ + shouldError: true, + error: new Error('Initial error'), + }) + const { result, rerender } = renderUseChat({ + connection: errorAdapter, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.error).toBeDefined() + }) + + // Switch to working adapter + const workingAdapter = createMockConnectionAdapter({ + chunks: createTextChunks('Success'), + }) + rerender({ connection: workingAdapter }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + // Error should be cleared on success + expect(result.current.messages.length).toBeGreaterThan(0) + }) + }) + + it('should handle tool execution errors', async () => { + const toolCalls = createToolCallChunks([ + { id: 'tool-1', name: 'testTool', arguments: '{"param": "value"}' }, + ]) + const adapter = createMockConnectionAdapter({ chunks: toolCalls }) + const { result } = renderUseChat({ + connection: adapter, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + expect(result.current.messages.length).toBeGreaterThan(0) + }) + + // Tool errors are handled by adding error output to the tool call part + // The error state is not set for tool execution failures + // Check that the message contains a tool call with error output + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + + if (assistantMessage) { + const toolCallPart = assistantMessage.parts.find( + (p) => p.type === 'tool-call', + ) + expect(toolCallPart).toBeDefined() + } + }) + }) + + describe('multiple hook instances', () => { + it('should maintain independent state per instance', async () => { + const adapter1 = createMockConnectionAdapter({ + chunks: createTextChunks('Response 1'), + }) + const adapter2 = createMockConnectionAdapter({ + chunks: createTextChunks('Response 2'), + }) + + const { result: result1 } = renderUseChat({ + connection: adapter1, + id: 'chat-1', + }) + const { result: result2 } = renderUseChat({ + connection: adapter2, + id: 'chat-2', + }) + + await act(async () => { + await result1.current.sendMessage('Hello 1') + await result2.current.sendMessage('Hello 2') + }) + + await waitFor(() => { + expect(result1.current.messages.length).toBeGreaterThan(0) + expect(result2.current.messages.length).toBeGreaterThan(0) + }) + + // Each instance should have its own messages + expect(result1.current.messages.length).toBe( + result2.current.messages.length, + ) + expect(result1.current.messages[0]?.parts[0]).not.toEqual( + result2.current.messages[0]?.parts[0], + ) + }) + + it('should handle different IDs correctly', () => { + const adapter = createMockConnectionAdapter() + const { result: result1 } = renderUseChat({ + connection: adapter, + id: 'chat-1', + }) + const { result: result2 } = renderUseChat({ + connection: adapter, + id: 'chat-2', + }) + + // Should not interfere with each other + expect(result1.current.messages).toEqual([]) + expect(result2.current.messages).toEqual([]) + }) + + it('should not have cross-contamination', async () => { + const adapter1 = createMockConnectionAdapter({ + chunks: createTextChunks('One'), + }) + const adapter2 = createMockConnectionAdapter({ + chunks: createTextChunks('Two'), + }) + + const { result: result1 } = renderUseChat({ + connection: adapter1, + }) + const { result: result2 } = renderUseChat({ + connection: adapter2, + }) + + await act(async () => { + await result1.current.sendMessage('Message 1') + }) + + await waitFor(() => { + expect(result1.current.messages.length).toBeGreaterThan(0) + }) + + // Second instance should still be empty + expect(result2.current.messages.length).toBe(0) + + await act(async () => { + await result2.current.sendMessage('Message 2') + }) + + await waitFor(() => { + expect(result2.current.messages.length).toBeGreaterThan(0) + }) + + // Both should have messages, but different ones + expect(result1.current.messages.length).toBeGreaterThan(0) + expect(result2.current.messages.length).toBeGreaterThan(0) + expect(result1.current.messages[0]?.parts[0]).not.toEqual( + result2.current.messages[0]?.parts[0], + ) + }) + }) + + describe('tool operations', () => { + it('should handle addToolResult', async () => { + const toolCalls = createToolCallChunks([ + { id: 'tool-1', name: 'testTool', arguments: '{"param": "value"}' }, + ]) + const adapter = createMockConnectionAdapter({ chunks: toolCalls }) + const { result } = renderUseChat({ + connection: adapter, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + }) + + // Find tool call + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + const toolCallPart = assistantMessage?.parts.find( + (p) => p.type === 'tool-call', + ) + + if (toolCallPart) { + await act(async () => { + await result.current.addToolResult({ + toolCallId: toolCallPart.id, + tool: toolCallPart.name, + output: { result: 'manual' }, + }) + }) + + // Should update the tool call + await waitFor(() => { + const updatedMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + const updatedToolCall = updatedMessage?.parts.find( + (p) => p.type === 'tool-call' && p.id === toolCallPart.id, + ) + expect(updatedToolCall).toBeDefined() + }) + } + }) + + it('should handle addToolApprovalResponse', async () => { + const toolCalls = createToolCallChunks([ + { id: 'tool-1', name: 'testTool', arguments: '{"param": "value"}' }, + ]) + const adapter = createMockConnectionAdapter({ chunks: toolCalls }) + const { result } = renderUseChat({ + connection: adapter, + }) + + await act(async () => { + await result.current.sendMessage('Test') + }) + + await waitFor(() => { + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + expect(assistantMessage).toBeDefined() + }) + + // Find tool call with approval + const assistantMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + const toolCallPart = assistantMessage?.parts.find( + (p) => p.type === 'tool-call' && p.approval, + ) + + if ( + toolCallPart && + toolCallPart.type === 'tool-call' && + toolCallPart.approval + ) { + await act(async () => { + await result.current.addToolApprovalResponse({ + id: toolCallPart.approval!.id, + approved: true, + }) + }) + + // Should update approval state + await waitFor(() => { + const updatedMessage = result.current.messages.find( + (m) => m.role === 'assistant', + ) + const updatedToolCall = updatedMessage?.parts.find( + (p) => p.type === 'tool-call' && p.id === toolCallPart.id, + ) + if (updatedToolCall && updatedToolCall.type === 'tool-call') { + expect(updatedToolCall.approval?.approved).toBe(true) + } + }) + } + }) + }) + }) +}) diff --git a/packages/typescript/ai-preact/tsconfig.json b/packages/typescript/ai-preact/tsconfig.json new file mode 100644 index 00000000..2fcecae9 --- /dev/null +++ b/packages/typescript/ai-preact/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "jsx": "react-jsx", + "jsxImportSource": "preact", + "lib": ["ES2022", "DOM"] + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "tests/**/*.ts", "tests/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-preact/vite.config.ts b/packages/typescript/ai-preact/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-preact/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/ai-preact/vitest.config.ts b/packages/typescript/ai-preact/vitest.config.ts new file mode 100644 index 00000000..6733c18a --- /dev/null +++ b/packages/typescript/ai-preact/vitest.config.ts @@ -0,0 +1,35 @@ +import { defineConfig } from 'vitest/config' +import { resolve } from 'path' +import { fileURLToPath } from 'url' + +const __dirname = fileURLToPath(new URL('.', import.meta.url)) + +export default defineConfig({ + test: { + globals: true, + environment: 'jsdom', + include: ['tests/**/*.test.ts', 'tests/**/*.test.tsx'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.test.tsx', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, + resolve: { + alias: { + '@tanstack/ai/event-client': resolve( + __dirname, + '../ai/src/event-client.ts', + ), + }, + }, +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c3ed56e5..72b29747 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -733,6 +733,31 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-preact: + dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + '@tanstack/ai-client': + specifier: workspace:* + version: link:../ai-client + devDependencies: + '@testing-library/preact': + specifier: ^3.2.4 + version: 3.2.4(preact@10.28.0) + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.15(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + jsdom: + specifier: ^27.2.0 + version: 27.3.0(postcss@8.5.6) + preact: + specifier: ^10.26.9 + version: 10.28.0 + vite: + specifier: ^7.2.7 + version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-react: dependencies: '@tanstack/ai': @@ -3432,6 +3457,16 @@ packages: resolution: {integrity: sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==} engines: {node: '>=18'} + '@testing-library/dom@8.20.1': + resolution: {integrity: sha512-/DiOQ5xBxgdYRC8LNk7U+RWat0S3qRLeIw3ZIkMQ9kkVlRmwD/Eg8k8CqIpD6GW7u20JIUOfMKbxtiLutpjQ4g==} + engines: {node: '>=12'} + + '@testing-library/preact@3.2.4': + resolution: {integrity: sha512-F+kJ243LP6VmEK1M809unzTE/ijg+bsMNuiRN0JEDIJBELKKDNhdgC/WrUSZ7klwJvtlO3wQZ9ix+jhObG07Fg==} + engines: {node: '>= 12'} + peerDependencies: + preact: '>=10 || ^10.0.0-alpha.0 || ^10.0.0-beta.0' + '@testing-library/react@16.3.0': resolution: {integrity: sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==} engines: {node: '>=18'} @@ -3972,6 +4007,9 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + aria-query@5.1.3: + resolution: {integrity: sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==} + aria-query@5.3.0: resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==} @@ -3979,6 +4017,10 @@ packages: resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==} engines: {node: '>= 0.4'} + array-buffer-byte-length@1.0.2: + resolution: {integrity: sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==} + engines: {node: '>= 0.4'} + array-union@2.1.0: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} @@ -4014,6 +4056,10 @@ packages: peerDependencies: postcss: ^8.1.0 + available-typed-arrays@1.0.7: + resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} + engines: {node: '>= 0.4'} + axios@1.13.2: resolution: {integrity: sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==} @@ -4152,6 +4198,10 @@ packages: resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} engines: {node: '>= 0.4'} + call-bind@1.0.8: + resolution: {integrity: sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==} + engines: {node: '>= 0.4'} + call-bound@1.0.4: resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} engines: {node: '>= 0.4'} @@ -4464,6 +4514,10 @@ packages: dedent-js@1.0.1: resolution: {integrity: sha512-OUepMozQULMLUmhxS95Vudo0jb0UchLimi3+pQ2plj61Fcy8axbP9hbiD4Sz6DPqn6XG3kfmziVfQ1rSys5AJQ==} + deep-equal@2.2.3: + resolution: {integrity: sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA==} + engines: {node: '>= 0.4'} + deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} @@ -4474,10 +4528,18 @@ packages: defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} + define-data-property@1.1.4: + resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} + engines: {node: '>= 0.4'} + define-lazy-prop@2.0.0: resolution: {integrity: sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==} engines: {node: '>=8'} + define-properties@1.2.1: + resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} + engines: {node: '>= 0.4'} + defu@6.1.4: resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} @@ -4655,6 +4717,9 @@ packages: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} + es-get-iterator@1.1.3: + resolution: {integrity: sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==} + es-module-lexer@1.7.0: resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} @@ -4942,6 +5007,10 @@ packages: debug: optional: true + for-each@0.3.5: + resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} + engines: {node: '>= 0.4'} + foreground-child@3.3.1: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} @@ -5001,6 +5070,9 @@ packages: function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + functions-have-names@1.2.3: + resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + gaxios@7.1.3: resolution: {integrity: sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==} engines: {node: '>=18'} @@ -5121,10 +5193,17 @@ packages: resolution: {integrity: sha512-QsCdAUHAmiDeKeaNojb1OHOPF7NjcWPBR7obdu3NwH2a/oyQaLg5d0aaCy/9My6CdPChYF07dvz5chaXBGaD4g==} engines: {node: '>=20.0.0'} + has-bigints@1.1.0: + resolution: {integrity: sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==} + engines: {node: '>= 0.4'} + has-flag@4.0.0: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} + has-property-descriptors@1.0.2: + resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} + has-symbols@1.1.0: resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} engines: {node: '>= 0.4'} @@ -5289,6 +5368,10 @@ packages: inline-style-parser@0.2.7: resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} + internal-slot@1.1.0: + resolution: {integrity: sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==} + engines: {node: '>= 0.4'} + ioredis@5.8.2: resolution: {integrity: sha512-C6uC+kleiIMmjViJINWk80sOQw5lEzse1ZmvD+S/s8p8CWapftSaC+kocGTx6xrbrJ4WmYQGC08ffHLr6ToR6Q==} engines: {node: '>=12.22.0'} @@ -5306,14 +5389,38 @@ packages: is-alphanumerical@2.0.1: resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} + is-arguments@1.2.0: + resolution: {integrity: sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==} + engines: {node: '>= 0.4'} + + is-array-buffer@3.0.5: + resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==} + engines: {node: '>= 0.4'} + + is-bigint@1.1.0: + resolution: {integrity: sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==} + engines: {node: '>= 0.4'} + is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} + is-boolean-object@1.2.2: + resolution: {integrity: sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==} + engines: {node: '>= 0.4'} + + is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + is-core-module@2.16.1: resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} engines: {node: '>= 0.4'} + is-date-object@1.1.0: + resolution: {integrity: sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==} + engines: {node: '>= 0.4'} + is-decimal@2.0.1: resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} @@ -5355,9 +5462,17 @@ packages: resolution: {integrity: sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==} engines: {node: '>=8'} + is-map@2.0.3: + resolution: {integrity: sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==} + engines: {node: '>= 0.4'} + is-module@1.0.0: resolution: {integrity: sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==} + is-number-object@1.1.1: + resolution: {integrity: sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==} + engines: {node: '>= 0.4'} + is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} @@ -5378,6 +5493,18 @@ packages: is-reference@3.0.3: resolution: {integrity: sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==} + is-regex@1.2.1: + resolution: {integrity: sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==} + engines: {node: '>= 0.4'} + + is-set@2.0.3: + resolution: {integrity: sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==} + engines: {node: '>= 0.4'} + + is-shared-array-buffer@1.0.4: + resolution: {integrity: sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==} + engines: {node: '>= 0.4'} + is-stream@2.0.1: resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} engines: {node: '>=8'} @@ -5386,14 +5513,30 @@ packages: resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + is-string@1.1.1: + resolution: {integrity: sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==} + engines: {node: '>= 0.4'} + is-subdir@1.2.0: resolution: {integrity: sha512-2AT6j+gXe/1ueqbW6fLZJiIw3F8iXGJtt0yDrZaBhAZEG1raiTxKWU+IPqMCzQAXOUCKdA4UDMgacKH25XG2Cw==} engines: {node: '>=4'} + is-symbol@1.1.1: + resolution: {integrity: sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==} + engines: {node: '>= 0.4'} + is-unicode-supported@0.1.0: resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} engines: {node: '>=10'} + is-weakmap@2.0.2: + resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==} + engines: {node: '>= 0.4'} + + is-weakset@2.0.4: + resolution: {integrity: sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==} + engines: {node: '>= 0.4'} + is-what@4.1.16: resolution: {integrity: sha512-ZhMwEosbFJkA0YhFnNDgTM4ZxDRsS6HqTo7qsZM08fehyRYIYa0yHu5R6mgo1n/8MgaPBXiPimPD77baVFYg+A==} engines: {node: '>=12.13'} @@ -5417,6 +5560,9 @@ packages: isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + isarray@2.0.5: + resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + isbot@5.1.32: resolution: {integrity: sha512-VNfjM73zz2IBZmdShMfAUg10prm6t7HFUQmNAEOAVS4YH92ZrZcvkMcGX6cIgBJAzWDzPent/EeAtYEHNPNPBQ==} engines: {node: '>=18'} @@ -6153,6 +6299,18 @@ packages: resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} engines: {node: '>= 0.4'} + object-is@1.1.6: + resolution: {integrity: sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==} + engines: {node: '>= 0.4'} + + object-keys@1.1.1: + resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} + engines: {node: '>= 0.4'} + + object.assign@4.1.7: + resolution: {integrity: sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==} + engines: {node: '>= 0.4'} + obug@2.1.1: resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} @@ -6352,6 +6510,10 @@ packages: engines: {node: '>=18'} hasBin: true + possible-typed-array-names@1.1.0: + resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} + engines: {node: '>= 0.4'} + postcss-value-parser@4.2.0: resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} @@ -6359,6 +6521,9 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + preact@10.28.0: + resolution: {integrity: sha512-rytDAoiXr3+t6OIP3WGlDd0ouCUG1iCWzkcY3++Nreuoi17y6T5i/zRhe6uYfoVcxq6YU+sBtJouuRDsq8vvqA==} + prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -6533,6 +6698,10 @@ packages: resolution: {integrity: sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==} engines: {node: '>=4'} + regexp.prototype.flags@1.5.4: + resolution: {integrity: sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==} + engines: {node: '>= 0.4'} + rehype-highlight@7.0.2: resolution: {integrity: sha512-k158pK7wdC2qL3M5NcZROZ2tR/l7zOzjxXd5VGdcfIyoijjQqpHd3JKtYSBDpDZ38UI2WJWuFAtkMDxmx5kstA==} @@ -6667,6 +6836,10 @@ packages: safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + safe-regex-test@1.1.0: + resolution: {integrity: sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==} + engines: {node: '>= 0.4'} + safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} @@ -6739,6 +6912,14 @@ packages: set-cookie-parser@2.7.2: resolution: {integrity: sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==} + set-function-length@1.2.2: + resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} + engines: {node: '>= 0.4'} + + set-function-name@2.0.2: + resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==} + engines: {node: '>= 0.4'} + setprototypeof@1.2.0: resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} @@ -6907,6 +7088,10 @@ packages: std-env@3.10.0: resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + stop-iteration-iterator@1.1.0: + resolution: {integrity: sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==} + engines: {node: '>= 0.4'} + streamx@2.23.0: resolution: {integrity: sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==} @@ -7673,6 +7858,18 @@ packages: whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + which-boxed-primitive@1.1.1: + resolution: {integrity: sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==} + engines: {node: '>= 0.4'} + + which-collection@1.0.2: + resolution: {integrity: sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==} + engines: {node: '>= 0.4'} + + which-typed-array@1.1.19: + resolution: {integrity: sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==} + engines: {node: '>= 0.4'} + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} @@ -10428,6 +10625,22 @@ snapshots: picocolors: 1.1.1 pretty-format: 27.5.1 + '@testing-library/dom@8.20.1': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/runtime': 7.28.4 + '@types/aria-query': 5.0.4 + aria-query: 5.1.3 + chalk: 4.1.2 + dom-accessibility-api: 0.5.16 + lz-string: 1.5.0 + pretty-format: 27.5.1 + + '@testing-library/preact@3.2.4(preact@10.28.0)': + dependencies: + '@testing-library/dom': 8.20.1 + preact: 10.28.0 + '@testing-library/react@16.3.0(@testing-library/dom@10.4.1)(@types/react-dom@19.2.3(@types/react@19.2.7))(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@babel/runtime': 7.28.4 @@ -11089,12 +11302,21 @@ snapshots: argparse@2.0.1: {} + aria-query@5.1.3: + dependencies: + deep-equal: 2.2.3 + aria-query@5.3.0: dependencies: dequal: 2.0.3 aria-query@5.3.2: {} + array-buffer-byte-length@1.0.2: + dependencies: + call-bound: 1.0.4 + is-array-buffer: 3.0.5 + array-union@2.1.0: {} assertion-error@2.0.1: {} @@ -11130,6 +11352,10 @@ snapshots: postcss: 8.5.6 postcss-value-parser: 4.2.0 + available-typed-arrays@1.0.7: + dependencies: + possible-typed-array-names: 1.1.0 + axios@1.13.2: dependencies: follow-redirects: 1.15.11 @@ -11291,6 +11517,13 @@ snapshots: es-errors: 1.3.0 function-bind: 1.1.2 + call-bind@1.0.8: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + get-intrinsic: 1.3.0 + set-function-length: 1.2.2 + call-bound@1.0.4: dependencies: call-bind-apply-helpers: 1.0.2 @@ -11556,6 +11789,27 @@ snapshots: dedent-js@1.0.1: {} + deep-equal@2.2.3: + dependencies: + array-buffer-byte-length: 1.0.2 + call-bind: 1.0.8 + es-get-iterator: 1.1.3 + get-intrinsic: 1.3.0 + is-arguments: 1.2.0 + is-array-buffer: 3.0.5 + is-date-object: 1.1.0 + is-regex: 1.2.1 + is-shared-array-buffer: 1.0.4 + isarray: 2.0.5 + object-is: 1.1.6 + object-keys: 1.1.1 + object.assign: 4.1.7 + regexp.prototype.flags: 1.5.4 + side-channel: 1.1.0 + which-boxed-primitive: 1.1.1 + which-collection: 1.0.2 + which-typed-array: 1.1.19 + deep-is@0.1.4: {} deepmerge@4.3.1: {} @@ -11564,8 +11818,20 @@ snapshots: dependencies: clone: 1.0.4 + define-data-property@1.1.4: + dependencies: + es-define-property: 1.0.1 + es-errors: 1.3.0 + gopd: 1.2.0 + define-lazy-prop@2.0.0: {} + define-properties@1.2.1: + dependencies: + define-data-property: 1.1.4 + has-property-descriptors: 1.0.2 + object-keys: 1.1.1 + defu@6.1.4: {} delayed-stream@1.0.0: {} @@ -11704,6 +11970,18 @@ snapshots: es-errors@1.3.0: {} + es-get-iterator@1.1.3: + dependencies: + call-bind: 1.0.8 + get-intrinsic: 1.3.0 + has-symbols: 1.1.0 + is-arguments: 1.2.0 + is-map: 2.0.3 + is-set: 2.0.3 + is-string: 1.1.1 + isarray: 2.0.5 + stop-iteration-iterator: 1.1.0 + es-module-lexer@1.7.0: {} es-object-atoms@1.1.1: @@ -12113,6 +12391,10 @@ snapshots: follow-redirects@1.15.11: {} + for-each@0.3.5: + dependencies: + is-callable: 1.2.7 + foreground-child@3.3.1: dependencies: cross-spawn: 7.0.6 @@ -12168,6 +12450,8 @@ snapshots: function-bind@1.1.2: {} + functions-have-names@1.2.3: {} + gaxios@7.1.3: dependencies: extend: 3.0.2 @@ -12338,8 +12622,14 @@ snapshots: '@types/whatwg-mimetype': 3.0.2 whatwg-mimetype: 3.0.0 + has-bigints@1.1.0: {} + has-flag@4.0.0: {} + has-property-descriptors@1.0.2: + dependencies: + es-define-property: 1.0.1 + has-symbols@1.1.0: {} has-tostringtag@1.0.2: @@ -12552,6 +12842,12 @@ snapshots: inline-style-parser@0.2.7: {} + internal-slot@1.1.0: + dependencies: + es-errors: 1.3.0 + hasown: 2.0.2 + side-channel: 1.1.0 + ioredis@5.8.2: dependencies: '@ioredis/commands': 1.4.0 @@ -12577,14 +12873,41 @@ snapshots: is-alphabetical: 2.0.1 is-decimal: 2.0.1 + is-arguments@1.2.0: + dependencies: + call-bound: 1.0.4 + has-tostringtag: 1.0.2 + + is-array-buffer@3.0.5: + dependencies: + call-bind: 1.0.8 + call-bound: 1.0.4 + get-intrinsic: 1.3.0 + + is-bigint@1.1.0: + dependencies: + has-bigints: 1.1.0 + is-binary-path@2.1.0: dependencies: binary-extensions: 2.3.0 + is-boolean-object@1.2.2: + dependencies: + call-bound: 1.0.4 + has-tostringtag: 1.0.2 + + is-callable@1.2.7: {} + is-core-module@2.16.1: dependencies: hasown: 2.0.2 + is-date-object@1.1.0: + dependencies: + call-bound: 1.0.4 + has-tostringtag: 1.0.2 + is-decimal@2.0.1: {} is-docker@2.2.1: {} @@ -12611,8 +12934,15 @@ snapshots: is-interactive@1.0.0: {} + is-map@2.0.3: {} + is-module@1.0.0: {} + is-number-object@1.1.1: + dependencies: + call-bound: 1.0.4 + has-tostringtag: 1.0.2 + is-number@7.0.0: {} is-plain-obj@4.1.0: {} @@ -12629,16 +12959,47 @@ snapshots: dependencies: '@types/estree': 1.0.8 + is-regex@1.2.1: + dependencies: + call-bound: 1.0.4 + gopd: 1.2.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + is-set@2.0.3: {} + + is-shared-array-buffer@1.0.4: + dependencies: + call-bound: 1.0.4 + is-stream@2.0.1: {} is-stream@3.0.0: {} + is-string@1.1.1: + dependencies: + call-bound: 1.0.4 + has-tostringtag: 1.0.2 + is-subdir@1.2.0: dependencies: better-path-resolve: 1.0.0 + is-symbol@1.1.1: + dependencies: + call-bound: 1.0.4 + has-symbols: 1.1.0 + safe-regex-test: 1.1.0 + is-unicode-supported@0.1.0: {} + is-weakmap@2.0.2: {} + + is-weakset@2.0.4: + dependencies: + call-bound: 1.0.4 + get-intrinsic: 1.3.0 + is-what@4.1.16: {} is-windows@1.0.2: {} @@ -12657,6 +13018,8 @@ snapshots: isarray@1.0.0: {} + isarray@2.0.5: {} + isbot@5.1.32: {} isexe@2.0.0: {} @@ -13693,6 +14056,22 @@ snapshots: object-inspect@1.13.4: {} + object-is@1.1.6: + dependencies: + call-bind: 1.0.8 + define-properties: 1.2.1 + + object-keys@1.1.1: {} + + object.assign@4.1.7: + dependencies: + call-bind: 1.0.8 + call-bound: 1.0.4 + define-properties: 1.2.1 + es-object-atoms: 1.1.1 + has-symbols: 1.1.0 + object-keys: 1.1.1 + obug@2.1.1: {} ofetch@1.5.1: @@ -13905,6 +14284,8 @@ snapshots: optionalDependencies: fsevents: 2.3.2 + possible-typed-array-names@1.1.0: {} + postcss-value-parser@4.2.0: {} postcss@8.5.6: @@ -13913,6 +14294,8 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + preact@10.28.0: {} + prelude-ls@1.2.1: {} premove@4.0.0: {} @@ -14088,6 +14471,15 @@ snapshots: dependencies: redis-errors: 1.2.0 + regexp.prototype.flags@1.5.4: + dependencies: + call-bind: 1.0.8 + define-properties: 1.2.1 + es-errors: 1.3.0 + get-proto: 1.0.1 + gopd: 1.2.0 + set-function-name: 2.0.2 + rehype-highlight@7.0.2: dependencies: '@types/hast': 3.0.4 @@ -14280,6 +14672,12 @@ snapshots: safe-buffer@5.2.1: {} + safe-regex-test@1.1.0: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + is-regex: 1.2.1 + safer-buffer@2.1.2: {} saxes@6.0.0: @@ -14372,6 +14770,22 @@ snapshots: set-cookie-parser@2.7.2: {} + set-function-length@1.2.2: + dependencies: + define-data-property: 1.1.4 + es-errors: 1.3.0 + function-bind: 1.1.2 + get-intrinsic: 1.3.0 + gopd: 1.2.0 + has-property-descriptors: 1.0.2 + + set-function-name@2.0.2: + dependencies: + define-data-property: 1.1.4 + es-errors: 1.3.0 + functions-have-names: 1.2.3 + has-property-descriptors: 1.0.2 + setprototypeof@1.2.0: {} shebang-command@2.0.0: @@ -14529,6 +14943,11 @@ snapshots: std-env@3.10.0: {} + stop-iteration-iterator@1.1.0: + dependencies: + es-errors: 1.3.0 + internal-slot: 1.1.0 + streamx@2.23.0: dependencies: events-universal: 1.0.1 @@ -15434,6 +15853,31 @@ snapshots: tr46: 0.0.3 webidl-conversions: 3.0.1 + which-boxed-primitive@1.1.1: + dependencies: + is-bigint: 1.1.0 + is-boolean-object: 1.2.2 + is-number-object: 1.1.1 + is-string: 1.1.1 + is-symbol: 1.1.1 + + which-collection@1.0.2: + dependencies: + is-map: 2.0.3 + is-set: 2.0.3 + is-weakmap: 2.0.2 + is-weakset: 2.0.4 + + which-typed-array@1.1.19: + dependencies: + available-typed-arrays: 1.0.7 + call-bind: 1.0.8 + call-bound: 1.0.4 + for-each: 0.3.5 + get-proto: 1.0.1 + gopd: 1.2.0 + has-tostringtag: 1.0.2 + which@2.0.2: dependencies: isexe: 2.0.0 From 8c8fbbd40447aae363d54a1f72cb86eb277852bc Mon Sep 17 00:00:00 2001 From: jdecroock Date: Tue, 23 Dec 2025 12:42:22 +0100 Subject: [PATCH 2/4] Make minimum release 10.11 as useId was introduced there --- packages/typescript/ai-preact/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/typescript/ai-preact/package.json b/packages/typescript/ai-preact/package.json index ffcb66d8..b13b654e 100644 --- a/packages/typescript/ai-preact/package.json +++ b/packages/typescript/ai-preact/package.json @@ -52,6 +52,6 @@ }, "peerDependencies": { "@tanstack/ai": "workspace:^", - "preact": ">=10.0.0" + "preact": ">=10.11.0" } } From f6fe95080bbcb16969f358c6a0708521e47ae9ed Mon Sep 17 00:00:00 2001 From: jdecroock Date: Tue, 23 Dec 2025 12:47:49 +0100 Subject: [PATCH 3/4] Correct readme --- packages/typescript/ai-preact/README.md | 26 ------------------------- 1 file changed, 26 deletions(-) diff --git a/packages/typescript/ai-preact/README.md b/packages/typescript/ai-preact/README.md index f0553d37..130322f2 100644 --- a/packages/typescript/ai-preact/README.md +++ b/packages/typescript/ai-preact/README.md @@ -72,37 +72,11 @@ for await (const chunk of result) { Available adapters: `openaiText`, `openaiEmbed`, `openaiSummarize`, `anthropicText`, `geminiText`, `ollamaText`, and more. -## Bonus: TanStack Start Integration - -TanStack AI works with **any** framework (Next.js, Express, Remix, etc.). - -**With TanStack Start**, you get a bonus: share implementations between AI tools and server functions with `createServerFnTool`: - -```typescript -import { createServerFnTool } from '@tanstack/ai-preact' - -// Define once, get AI tool AND server function (TanStack Start only) -const getProducts = createServerFnTool({ - name: 'getProducts', - inputSchema: z.object({ query: z.string() }), - execute: async ({ query }) => db.products.search(query), -}) - -// Use in AI chat -chat({ tools: [getProducts.server] }) - -// Call directly from components (no API endpoint needed!) -const products = await getProducts.serverFn({ query: 'laptop' }) -``` - -No duplicate logic, full type safety, automatic validation. The `serverFn` feature requires TanStack Start. See [docs](https://tanstack.com/ai) for details. - ## Get Involved - We welcome issues and pull requests! - Participate in [GitHub discussions](https://github.com/TanStack/ai/discussions) - Chat with the community on [Discord](https://discord.com/invite/WrRKjPJ) -- See [CONTRIBUTING.md](./CONTRIBUTING.md) for setup instructions ## Partners From 07890c77a2b87418d24b2fe99fd55332a8863511 Mon Sep 17 00:00:00 2001 From: jdecroock Date: Tue, 23 Dec 2025 12:55:35 +0100 Subject: [PATCH 4/4] Add docs page --- docs/api/ai-preact.md | 317 ++++++++++++++++++++++++++++++++++++++++++ docs/config.json | 4 + 2 files changed, 321 insertions(+) create mode 100644 docs/api/ai-preact.md diff --git a/docs/api/ai-preact.md b/docs/api/ai-preact.md new file mode 100644 index 00000000..46dc1fa9 --- /dev/null +++ b/docs/api/ai-preact.md @@ -0,0 +1,317 @@ +--- +title: "@tanstack/ai-preact" +slug: /api/ai-preact +order: 5 +--- + +Preact hooks for TanStack AI, providing convenient Preact bindings for the headless client. + +## Installation + +```bash +npm install @tanstack/ai-preact +``` + +## `useChat(options?)` + +Main hook for managing chat state in Preact with full type safety. + +```typescript +import { useChat, fetchServerSentEvents } from "@tanstack/ai-preact"; +import { + clientTools, + createChatClientOptions, + type InferChatMessages +} from "@tanstack/ai-client"; + +function ChatComponent() { + // Create client tool implementations + const updateUI = updateUIDef.client((input) => { + setNotification(input.message); + return { success: true }; + }); + + // Create typed tools array (no 'as const' needed!) + const tools = clientTools(updateUI); + + const chatOptions = createChatClientOptions({ + connection: fetchServerSentEvents("/api/chat"), + tools, + }); + + // Fully typed messages! + type ChatMessages = InferChatMessages; + + const { messages, sendMessage, isLoading, error, addToolApprovalResponse } = + useChat(chatOptions); + + return
{/* Chat UI with typed messages */}
; +} +``` + +### Options + +Extends `ChatClientOptions` from `@tanstack/ai-client`: + +- `connection` - Connection adapter (required) +- `tools?` - Array of client tool implementations (with `.client()` method) +- `initialMessages?` - Initial messages array +- `id?` - Unique identifier for this chat instance +- `body?` - Additional body parameters to send +- `onResponse?` - Callback when response is received +- `onChunk?` - Callback when stream chunk is received +- `onFinish?` - Callback when response finishes +- `onError?` - Callback when error occurs +- `streamProcessor?` - Stream processing configuration + +**Note:** Client tools are now automatically executed - no `onToolCall` callback needed! + +### Returns + +```typescript +interface UseChatReturn { + messages: UIMessage[]; + sendMessage: (content: string) => Promise; + append: (message: ModelMessage | UIMessage) => Promise; + addToolResult: (result: { + toolCallId: string; + tool: string; + output: any; + state?: "output-available" | "output-error"; + errorText?: string; + }) => Promise; + addToolApprovalResponse: (response: { + id: string; + approved: boolean; + }) => Promise; + reload: () => Promise; + stop: () => void; + isLoading: boolean; + error: Error | undefined; + setMessages: (messages: UIMessage[]) => void; + clear: () => void; +} +``` + +## Connection Adapters + +Re-exported from `@tanstack/ai-client` for convenience: + +```typescript +import { + fetchServerSentEvents, + fetchHttpStream, + stream, + type ConnectionAdapter, +} from "@tanstack/ai-preact"; +``` + +## Example: Basic Chat + +```typescript +import { useState } from "preact/hooks"; +import { useChat, fetchServerSentEvents } from "@tanstack/ai-preact"; + +export function Chat() { + const [input, setInput] = useState(""); + + const { messages, sendMessage, isLoading } = useChat({ + connection: fetchServerSentEvents("/api/chat"), + }); + + const handleSubmit = (e) => { + e.preventDefault(); + if (input.trim() && !isLoading) { + sendMessage(input); + setInput(""); + } + }; + + return ( +
+
+ {messages.map((message) => ( +
+ {message.role}: + {message.parts.map((part, idx) => { + if (part.type === "thinking") { + return ( +
+ 💭 Thinking: {part.content} +
+ ); + } + if (part.type === "text") { + return {part.content}; + } + return null; + })} +
+ ))} +
+
+ setInput(e.currentTarget.value)} + disabled={isLoading} + /> + +
+
+ ); +} +``` + +## Example: Tool Approval + +```typescript +import { useChat, fetchServerSentEvents } from "@tanstack/ai-preact"; + +export function ChatWithApproval() { + const { messages, sendMessage, addToolApprovalResponse } = useChat({ + connection: fetchServerSentEvents("/api/chat"), + }); + + return ( +
+ {messages.map((message) => + message.parts.map((part) => { + if ( + part.type === "tool-call" && + part.state === "approval-requested" && + part.approval + ) { + return ( +
+

Approve: {part.name}

+ + +
+ ); + } + return null; + }) + )} +
+ ); +} +``` + +## Example: Client Tools with Type Safety + +```typescript +import { useChat, fetchServerSentEvents } from "@tanstack/ai-preact"; +import { + clientTools, + createChatClientOptions, + type InferChatMessages +} from "@tanstack/ai-client"; +import { updateUIDef, saveToStorageDef } from "./tool-definitions"; +import { useState } from "preact/hooks"; + +export function ChatWithClientTools() { + const [notification, setNotification] = useState(null); + + // Create client implementations + const updateUI = updateUIDef.client((input) => { + // ✅ input is fully typed! + setNotification({ message: input.message, type: input.type }); + return { success: true }; + }); + + const saveToStorage = saveToStorageDef.client((input) => { + localStorage.setItem(input.key, input.value); + return { saved: true }; + }); + + // Create typed tools array (no 'as const' needed!) + const tools = clientTools(updateUI, saveToStorage); + + const { messages, sendMessage } = useChat({ + connection: fetchServerSentEvents("/api/chat"), + tools, // ✅ Automatic execution, full type safety + }); + + return ( +
+ {messages.map((message) => + message.parts.map((part) => { + if (part.type === "tool-call" && part.name === "updateUI") { + // ✅ part.input and part.output are fully typed! + return
Tool executed: {part.name}
; + } + }) + )} +
+ ); +} +``` + +## `createChatClientOptions(options)` + +Helper to create typed chat options (re-exported from `@tanstack/ai-client`). + +```typescript +import { + clientTools, + createChatClientOptions, + type InferChatMessages +} from "@tanstack/ai-client"; + +// Create typed tools array (no 'as const' needed!) +const tools = clientTools(tool1, tool2); + +const chatOptions = createChatClientOptions({ + connection: fetchServerSentEvents("/api/chat"), + tools, +}); + +type Messages = InferChatMessages; +``` + +## Types + +Re-exported from `@tanstack/ai-client`: + +- `UIMessage` - Message type with tool type parameter +- `MessagePart` - Message part with tool type parameter +- `TextPart` - Text content part +- `ThinkingPart` - Thinking content part +- `ToolCallPart` - Tool call part (discriminated union) +- `ToolResultPart` - Tool result part +- `ChatClientOptions` - Chat client options +- `ConnectionAdapter` - Connection adapter interface +- `InferChatMessages` - Extract message type from options + +Re-exported from `@tanstack/ai`: + +- `toolDefinition()` - Create isomorphic tool definition +- `ToolDefinitionInstance` - Tool definition type +- `ClientTool` - Client tool type +- `ServerTool` - Server tool type + +## Next Steps + +- [Getting Started](../getting-started/quick-start) - Learn the basics +- [Tools Guide](../guides/tools) - Learn about the isomorphic tool system +- [Client Tools](../guides/client-tools) - Learn about client-side tools diff --git a/docs/config.json b/docs/config.json index 375489c0..0cd7d12b 100644 --- a/docs/config.json +++ b/docs/config.json @@ -115,6 +115,10 @@ "label": "@tanstack/ai-react", "to": "api/ai-react" }, + { + "label": "@tanstack/ai-preact", + "to": "api/ai-preact" + }, { "label": "@tanstack/ai-solid", "to": "api/ai-solid"