From 2040f865c189be7cc473df037f7ca1c040a0d978 Mon Sep 17 00:00:00 2001 From: yousefed Date: Tue, 17 Jun 2025 15:39:58 +0200 Subject: [PATCH] custom ai excutor --- examples/09-ai/01-minimal/App.tsx | 10 +- packages/xl-ai/src/AIExtension.ts | 25 +++- packages/xl-ai/src/api/LLMRequest.ts | 110 ++++++++++-------- packages/xl-ai/src/api/LLMResponse.ts | 43 +++++++ packages/xl-ai/src/api/index.ts | 1 + .../src/streamTool/callLLMWithStreamTools.ts | 40 +++++++ 6 files changed, 176 insertions(+), 53 deletions(-) diff --git a/examples/09-ai/01-minimal/App.tsx b/examples/09-ai/01-minimal/App.tsx index 330538618..e793ba789 100644 --- a/examples/09-ai/01-minimal/App.tsx +++ b/examples/09-ai/01-minimal/App.tsx @@ -7,9 +7,9 @@ import "@blocknote/mantine/style.css"; import { FormattingToolbar, FormattingToolbarController, - SuggestionMenuController, getDefaultReactSlashMenuItems, getFormattingToolbarItems, + SuggestionMenuController, useCreateBlockNote, } from "@blocknote/react"; import { @@ -64,6 +64,14 @@ export default function App() { extensions: [ createAIExtension({ model, + /* + executor: (opts) => { + // fetch data + const resp = await fetch(opts) + // process to stream tool calls + const streamToolCalls = await yourLogicToConvertRespToStreamToolCalls(opts); + return LLMResponse.fromArray(opts.messages, opts.streamTools, streamToolCalls); + },*/ }), ], // We set some initial content for demo purposes diff --git a/packages/xl-ai/src/AIExtension.ts b/packages/xl-ai/src/AIExtension.ts index e4ba5d684..9922e23c7 100644 --- a/packages/xl-ai/src/AIExtension.ts +++ b/packages/xl-ai/src/AIExtension.ts @@ -9,15 +9,19 @@ import { suggestChanges, } from "@blocknote/prosemirror-suggest-changes"; import { APICallError, LanguageModel, RetryError } from "ai"; +import { Fragment, Slice } from "prosemirror-model"; import { Plugin, PluginKey } from "prosemirror-state"; import { fixTablesKey } from "prosemirror-tables"; import { createStore, StoreApi } from "zustand/vanilla"; -import { doLLMRequest, LLMRequestOptions } from "./api/LLMRequest.js"; +import { + doLLMRequest, + ExecuteLLMRequestOptions, + LLMRequestOptions, +} from "./api/LLMRequest.js"; import { LLMResponse } from "./api/LLMResponse.js"; import { PromptBuilder } from "./api/formats/PromptBuilder.js"; import { LLMFormat, llmFormats } from "./api/index.js"; import { createAgentCursorPlugin } from "./plugins/AgentCursorPlugin.js"; -import { Fragment, Slice } from "prosemirror-model"; type MakeOptional = Omit & Partial>; @@ -81,6 +85,13 @@ type GlobalLLMRequestOptions = { * @default the default prompt builder for the selected {@link dataFormat} */ promptBuilder?: PromptBuilder; + + /** + * Customize how your LLM backend is called. + * Implement this function if you want to call a backend that is not compatible with + * the Vercel AI SDK + */ + executor?: (opts: ExecuteLLMRequestOptions) => Promise; }; const PLUGIN_KEY = new PluginKey(`blocknote-ai-plugin`); @@ -112,7 +123,10 @@ export class AIExtension extends BlockNoteExtension { public readonly options: ReturnType< ReturnType< typeof createStore< - MakeOptional, "promptBuilder"> + MakeOptional< + Required, + "promptBuilder" | "executor" + > > > >; @@ -134,7 +148,10 @@ export class AIExtension extends BlockNoteExtension { super(); this.options = createStore< - MakeOptional, "promptBuilder"> + MakeOptional< + Required, + "promptBuilder" | "executor" + > >()((_set) => ({ dataFormat: llmFormats.html, stream: true, diff --git a/packages/xl-ai/src/api/LLMRequest.ts b/packages/xl-ai/src/api/LLMRequest.ts index c11a19d62..35e8c3825 100644 --- a/packages/xl-ai/src/api/LLMRequest.ts +++ b/packages/xl-ai/src/api/LLMRequest.ts @@ -1,23 +1,40 @@ import { BlockNoteEditor } from "@blocknote/core"; import { CoreMessage, generateObject, LanguageModelV1, streamObject } from "ai"; -import { - generateOperations, - streamOperations, -} from "../streamTool/callLLMWithStreamTools.js"; +import { createAISDKLLMRequestExecutor } from "../streamTool/callLLMWithStreamTools.js"; +import { StreamTool } from "../streamTool/streamTool.js"; import { isEmptyParagraph } from "../util/emptyBlock.js"; import { LLMResponse } from "./LLMResponse.js"; import type { PromptBuilder } from "./formats/PromptBuilder.js"; import { htmlBlockLLMFormat } from "./formats/html-blocks/htmlBlocks.js"; import { LLMFormat } from "./index.js"; +type MakeOptional = Omit & Partial>; + +export type ExecuteLLMRequestOptions = { + messages: CoreMessage[]; + streamTools: StreamTool[]; + llmRequestOptions: MakeOptional; + onStart?: () => void; +}; + export type LLMRequestOptions = { /** * The language model to use for the LLM call (AI SDK) * * (when invoking `callLLM` via the `AIExtension` this will default to the * model set in the `AIExtension` options) + * + * Note: perhaps we want to remove this */ - model: LanguageModelV1; + model?: LanguageModelV1; + + /** + * Customize how your LLM backend is called. + * Implement this function if you want to call a backend that is not compatible with + * the Vercel AI SDK + */ + executor?: (opts: ExecuteLLMRequestOptions) => Promise; + /** * The user prompt to use for the LLM call */ @@ -43,12 +60,6 @@ export type LLMRequestOptions = { * @default provided by the format (e.g. `llm.html.defaultPromptBuilder`) */ promptBuilder?: PromptBuilder; - /** - * The maximum number of retries for the LLM call - * - * @default 2 - */ - maxRetries?: number; /** * Whether to use the editor selection for the LLM call * @@ -68,15 +79,6 @@ export type LLMRequestOptions = { /** Enable the delete tool (default: true) */ delete?: boolean; }; - /** - * Whether to stream the LLM response or not - * - * When streaming, we use the AI SDK `streamObject` function, - * otherwise, we use the AI SDK `generateObject` function. - * - * @default true - */ - stream?: boolean; /** * If the user's cursor is in an empty paragraph, automatically delete it when the AI * is starting to write. @@ -102,6 +104,26 @@ export type LLMRequestOptions = { * @default true */ withDelays?: boolean; + + // The settings below might make more sense to be part of the executor + + /** + * Whether to stream the LLM response or not + * + * When streaming, we use the AI SDK `streamObject` function, + * otherwise, we use the AI SDK `generateObject` function. + * + * @default true + */ + stream?: boolean; + + /** + * The maximum number of retries for the LLM call + * + * @default 2 + */ + maxRetries?: number; + /** * Additional options to pass to the AI SDK `generateObject` function * (only used when `stream` is `false`) @@ -217,34 +239,26 @@ export async function doLLMRequest( opts.onBlockUpdate, ); - let response: - | Awaited>> - | Awaited>>; - - if (stream) { - response = await streamOperations( - streamTools, - { - messages, - ...rest, - }, - () => { - if (deleteCursorBlock) { - editor.removeBlocks([deleteCursorBlock]); - } - onStart?.(); - }, - ); - } else { - response = await generateOperations(streamTools, { - messages, - ...rest, - }); - if (deleteCursorBlock) { - editor.removeBlocks([deleteCursorBlock]); + let executor = opts.executor; + if (!executor) { + if (!opts.model) { + throw new Error("model is required when no executor is provided"); } - onStart?.(); + executor = createAISDKLLMRequestExecutor({ model: opts.model }); } - - return new LLMResponse(messages, response, streamTools); + return executor({ + onStart: () => { + if (deleteCursorBlock) { + editor.removeBlocks([deleteCursorBlock]); + } + onStart?.(); + }, + messages, + streamTools, + llmRequestOptions: { + ...opts, + ...rest, + stream, + }, + }); } diff --git a/packages/xl-ai/src/api/LLMResponse.ts b/packages/xl-ai/src/api/LLMResponse.ts index 1321ab5b9..85804376e 100644 --- a/packages/xl-ai/src/api/LLMResponse.ts +++ b/packages/xl-ai/src/api/LLMResponse.ts @@ -1,6 +1,7 @@ import { CoreMessage } from "ai"; import { OperationsResult } from "../streamTool/callLLMWithStreamTools.js"; import { StreamTool, StreamToolCall } from "../streamTool/streamTool.js"; +import { createAsyncIterableStreamFromAsyncIterable } from "../util/stream.js"; /** * Result of an LLM call with stream tools that apply changes to a BlockNote Editor @@ -61,4 +62,46 @@ export class LLMResponse { console.log(JSON.stringify(toolCall, null, 2)); } } + + /** + * Create a LLMResponse from an array of operations. + * + * Note: This is a temporary helper, we'll make it easier to create this from streaming data if required + */ + public static fromArray[]>( + messages: CoreMessage[], + streamTools: StreamTool[], + operations: StreamToolCall[], + ): LLMResponse { + return new LLMResponse( + messages, + OperationsResultFromArray(operations), + streamTools, + ); + } +} + +function OperationsResultFromArray[]>( + operations: StreamToolCall[], +): OperationsResult { + async function* singleChunkGenerator() { + for (const op of operations) { + yield { + operation: op, + isUpdateToPreviousOperation: false, + isPossiblyPartial: false, + }; + } + } + + return { + streamObjectResult: undefined, + generateObjectResult: undefined, + get operationsSource() { + return createAsyncIterableStreamFromAsyncIterable(singleChunkGenerator()); + }, + async getGeneratedOperations() { + return { operations }; + }, + }; } diff --git a/packages/xl-ai/src/api/index.ts b/packages/xl-ai/src/api/index.ts index fef3fbc23..9a8412793 100644 --- a/packages/xl-ai/src/api/index.ts +++ b/packages/xl-ai/src/api/index.ts @@ -42,4 +42,5 @@ export const llmFormats = { }; export { doLLMRequest as callLLM } from "./LLMRequest.js"; +export { LLMResponse } from "./LLMResponse.js"; export { promptHelpers } from "./promptHelpers/index.js"; diff --git a/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts b/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts index 78f7dd008..ae933692d 100644 --- a/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts +++ b/packages/xl-ai/src/streamTool/callLLMWithStreamTools.ts @@ -2,6 +2,7 @@ import { CoreMessage, GenerateObjectResult, LanguageModel, + LanguageModelV1, ObjectStreamPart, StreamObjectResult, generateObject, @@ -11,6 +12,8 @@ import { import { createStreamToolsArraySchema } from "./jsonSchema.js"; +import { ExecuteLLMRequestOptions } from "../api/LLMRequest.js"; +import { LLMResponse } from "../api/LLMResponse.js"; import { AsyncIterableStream, createAsyncIterableStream, @@ -350,3 +353,40 @@ function partialObjectStream( ), ); } + +export function createAISDKLLMRequestExecutor(opts: { + model: LanguageModelV1; +}) { + const { model } = opts; + return async (opts: ExecuteLLMRequestOptions) => { + const { messages, streamTools, llmRequestOptions, onStart } = opts; + const { stream, maxRetries, _generateObjectOptions, _streamObjectOptions } = + llmRequestOptions; + let response: + | Awaited>> + | Awaited>>; + + if (stream) { + response = await streamOperations( + streamTools, + { + messages, + model, + maxRetries, + ...(_streamObjectOptions as any), + }, + onStart, + ); + } else { + response = await generateOperations(streamTools, { + messages, + model, + maxRetries, + ...(_generateObjectOptions as any), + }); + onStart?.(); + } + + return new LLMResponse(messages, response, streamTools); + }; +}