diff --git a/apps/docs/docs.json b/apps/docs/docs.json index 9bd65c8c..54a17336 100644 --- a/apps/docs/docs.json +++ b/apps/docs/docs.json @@ -174,7 +174,7 @@ }, { "group": "Supermemory MCP", - "pages": ["mcp", "supermemory-mcp/setup"] + "pages": ["supermemory-mcp/mcp", "supermemory-mcp/setup"] }, { "group": "Migration Guides", diff --git a/apps/docs/supermemory-mcp/introduction.mdx b/apps/docs/supermemory-mcp/introduction.mdx index 2e6f3826..51f097ed 100644 --- a/apps/docs/supermemory-mcp/introduction.mdx +++ b/apps/docs/supermemory-mcp/introduction.mdx @@ -5,7 +5,7 @@ description: 'Give your AI assistants persistent memory with the Model Context P Supermemory MCP Server 4.0 is a lightweight component that gives AI assistants persistent memory across conversations. It serves as a universal memory layer enabling Large Language Models (LLMs) to maintain context and memories across different applications and sessions, solving the fundamental limitation of AI assistants forgetting everything between conversations. - + Jump to installation and setup diff --git a/apps/docs/mcp.mdx b/apps/docs/supermemory-mcp/mcp.mdx similarity index 100% rename from apps/docs/mcp.mdx rename to apps/docs/supermemory-mcp/mcp.mdx diff --git a/packages/tools/README.md b/packages/tools/README.md index 0b08f654..c0308141 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -217,6 +217,41 @@ const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", { }) ``` +#### Custom Prompt Templates + +Customize how memories are formatted and injected into the system prompt using the `promptTemplate` option. This is useful for: +- Using XML-based prompting (e.g., for Claude models) +- Custom branding (removing "supermemories" references) +- Controlling how your agent describes where information comes from + +```typescript +import { generateText } from "ai" +import { withSupermemory, type MemoryPromptData } from "@supermemory/tools/ai-sdk" +import { openai } from "@ai-sdk/openai" + +const customPrompt = (data: MemoryPromptData) => ` + +Here is some information about your past conversations with the user: +${data.userMemories} +${data.generalSearchMemories} + +`.trim() + +const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), "user-123", { + mode: "full", + promptTemplate: customPrompt, +}) + +const result = await generateText({ + model: modelWithCustomPrompt, + messages: [{ role: "user", content: "What do you know about me?" }], +}) +``` + +The `MemoryPromptData` object provides: +- `userMemories`: Pre-formatted markdown combining static profile facts (name, preferences, goals) and dynamic context (current projects, recent interests) +- `generalSearchMemories`: Pre-formatted search results based on semantic similarity to the current query + ### OpenAI SDK Usage #### OpenAI Middleware with Supermemory diff --git a/packages/tools/package.json b/packages/tools/package.json index dd8a1d00..346a3e40 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.3.65", + "version": "1.3.66", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index 0eedc753..beeef093 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -10,14 +10,46 @@ import { extractAssistantResponseText, saveMemoryAfterResponse, } from "./middleware" +import type { PromptTemplate, MemoryPromptData } from "./memory-prompt" interface WrapVercelLanguageModelOptions { + /** Optional conversation ID to group messages for contextual memory generation */ conversationId?: string + /** Enable detailed logging of memory search and injection */ verbose?: boolean + /** + * Memory retrieval mode: + * - "profile": Retrieves user profile memories (static + dynamic) without query filtering + * - "query": Searches memories based on semantic similarity to the user's message + * - "full": Combines both profile and query-based results + */ mode?: "profile" | "query" | "full" + /** + * Memory persistence mode: + * - "always": Automatically save conversations as memories + * - "never": Only retrieve memories, don't store new ones + */ addMemory?: "always" | "never" + /** Supermemory API key (falls back to SUPERMEMORY_API_KEY env var) */ apiKey?: string + /** Custom Supermemory API base URL */ baseUrl?: string + /** + * Custom function to format memory data into the system prompt. + * If not provided, uses the default "User Supermemories:" format. + * + * @example + * ```typescript + * promptTemplate: (data) => ` + * + * Here is some information about your past conversations: + * ${data.userMemories} + * ${data.generalSearchMemories} + * + * `.trim() + * ``` + */ + promptTemplate?: PromptTemplate } /** @@ -84,6 +116,7 @@ const wrapVercelLanguageModel = ( mode: options?.mode ?? "profile", addMemory: options?.addMemory ?? "never", baseUrl: options?.baseUrl, + promptTemplate: options?.promptTemplate, }) const wrappedModel = { @@ -183,4 +216,6 @@ const wrapVercelLanguageModel = ( export { wrapVercelLanguageModel as withSupermemory, type WrapVercelLanguageModelOptions as WithSupermemoryOptions, + type PromptTemplate, + type MemoryPromptData, } diff --git a/packages/tools/src/vercel/memory-prompt.ts b/packages/tools/src/vercel/memory-prompt.ts index ee7f85a7..3dfc203f 100644 --- a/packages/tools/src/vercel/memory-prompt.ts +++ b/packages/tools/src/vercel/memory-prompt.ts @@ -6,6 +6,46 @@ import { type ProfileStructure, } from "./util" +/** + * Data provided to the prompt template function for customizing memory injection. + */ +export interface MemoryPromptData { + /** + * Pre-formatted markdown combining static and dynamic profile memories. + * Contains core user facts (name, preferences, goals) and recent context (projects, interests). + */ + userMemories: string + /** + * Pre-formatted search results text for the current query. + * Contains memories retrieved based on semantic similarity to the conversation. + * Empty string if mode is "profile" only. + */ + generalSearchMemories: string +} + +/** + * Function type for customizing the memory prompt injection. + * Return the full string to be injected into the system prompt. + * + * @example + * ```typescript + * const promptTemplate: PromptTemplate = (data) => ` + * + * Here is some information about your past conversations: + * ${data.userMemories} + * ${data.generalSearchMemories} + * + * `.trim() + * ``` + */ +export type PromptTemplate = (data: MemoryPromptData) => string + +/** + * Default prompt template that replicates the original behavior. + */ +export const defaultPromptTemplate: PromptTemplate = (data) => + `User Supermemories: \n${data.userMemories}\n${data.generalSearchMemories}`.trim() + export const normalizeBaseUrl = (url?: string): string => { const defaultUrl = "https://api.supermemory.ai" if (!url) return defaultUrl @@ -20,12 +60,12 @@ const supermemoryProfileSearch = async ( ): Promise => { const payload = queryText ? JSON.stringify({ - q: queryText, - containerTag: containerTag, - }) + q: queryText, + containerTag: containerTag, + }) : JSON.stringify({ - containerTag: containerTag, - }) + containerTag: containerTag, + }) try { const response = await fetch(`${baseUrl}/v4/profile`, { @@ -60,6 +100,7 @@ export const addSystemPrompt = async ( mode: "profile" | "query" | "full", baseUrl: string, apiKey: string, + promptTemplate: PromptTemplate = defaultPromptTemplate, ): Promise => { const systemPromptExists = params.prompt.some( (prompt) => prompt.role === "system", @@ -68,16 +109,16 @@ export const addSystemPrompt = async ( const queryText = mode !== "profile" ? params.prompt - .slice() - .reverse() - .find((prompt: { role: string }) => prompt.role === "user") - ?.content?.filter( - (content: { type: string }) => content.type === "text", - ) - ?.map((content: { type: string; text: string }) => - content.type === "text" ? content.text : "", - ) - ?.join(" ") || "" + .slice() + .reverse() + .find((prompt: { role: string }) => prompt.role === "user") + ?.content?.filter( + (content: { type: string }) => content.type === "text", + ) + ?.map((content: { type: string; text: string }) => + content.type === "text" ? content.text : "", + ) + ?.join(" ") || "" : "" const memoriesResponse = await supermemoryProfileSearch( @@ -120,25 +161,29 @@ export const addSystemPrompt = async ( }, }) - const profileData = + const userMemories = mode !== "query" ? convertProfileToMarkdown({ - profile: { - static: deduplicated.static, - dynamic: deduplicated.dynamic, - }, - searchResults: { results: [] }, - }) + profile: { + static: deduplicated.static, + dynamic: deduplicated.dynamic, + }, + searchResults: { results: [] }, + }) : "" - const searchResultsMemories = + const generalSearchMemories = mode !== "profile" ? `Search results for user's recent message: \n${deduplicated.searchResults - .map((memory) => `- ${memory}`) - .join("\n")}` + .map((memory) => `- ${memory}`) + .join("\n")}` : "" - const memories = - `User Supermemories: \n${profileData}\n${searchResultsMemories}`.trim() + const promptData: MemoryPromptData = { + userMemories, + generalSearchMemories, + } + + const memories = promptTemplate(promptData) if (memories) { logger.debug("Memory content preview", { content: memories, diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 73adf200..43306934 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -11,7 +11,11 @@ import { getLastUserMessage, filterOutSupermemories, } from "./util" -import { addSystemPrompt, normalizeBaseUrl } from "./memory-prompt" +import { + addSystemPrompt, + normalizeBaseUrl, + type PromptTemplate, +} from "./memory-prompt" export const getConversationContent = (params: LanguageModelCallOptions) => { return params.prompt @@ -39,24 +43,24 @@ export const convertToConversationMessages = ( const messages: ConversationMessage[] = [] for (const msg of params.prompt) { + if (msg.role === "system") { + continue + } + if (typeof msg.content === "string") { - const filteredContent = filterOutSupermemories(msg.content) - if (filteredContent) { + if (msg.content) { messages.push({ - role: msg.role as "user" | "assistant" | "system" | "tool", - content: filteredContent, + role: msg.role as "user" | "assistant" | "tool", + content: msg.content, }) } } else { const contentParts = msg.content .map((c) => { - if (c.type === "text") { - const filteredText = filterOutSupermemories(c.text) - if (filteredText) { - return { - type: "text" as const, - text: filteredText, - } + if (c.type === "text" && c.text) { + return { + type: "text" as const, + text: c.text, } } if ( @@ -75,7 +79,7 @@ export const convertToConversationMessages = ( if (contentParts.length > 0) { messages.push({ - role: msg.role as "user" | "assistant" | "system" | "tool", + role: msg.role as "user" | "assistant" | "tool", content: contentParts, }) } @@ -153,14 +157,35 @@ export const saveMemoryAfterResponse = async ( } } +/** + * Configuration options for the Supermemory middleware. + */ export interface SupermemoryMiddlewareOptions { + /** Container tag/identifier for memory search (e.g., user ID, project ID) */ containerTag: string + /** Supermemory API key */ apiKey: string + /** Optional conversation ID to group messages for contextual memory generation */ conversationId?: string + /** Enable detailed logging of memory search and injection */ verbose?: boolean + /** + * Memory retrieval mode: + * - "profile": Retrieves user profile memories (static + dynamic) without query filtering + * - "query": Searches memories based on semantic similarity to the user's message + * - "full": Combines both profile and query-based results + */ mode?: "profile" | "query" | "full" + /** + * Memory persistence mode: + * - "always": Automatically save conversations as memories + * - "never": Only retrieve memories, don't store new ones + */ addMemory?: "always" | "never" + /** Custom Supermemory API base URL */ baseUrl?: string + /** Custom function to format memory data into the system prompt */ + promptTemplate?: PromptTemplate } export interface SupermemoryMiddlewareContext { @@ -172,6 +197,7 @@ export interface SupermemoryMiddlewareContext { addMemory: "always" | "never" normalizedBaseUrl: string apiKey: string + promptTemplate?: PromptTemplate } export const createSupermemoryContext = ( @@ -185,6 +211,7 @@ export const createSupermemoryContext = ( mode = "profile", addMemory = "never", baseUrl, + promptTemplate, } = options const logger = createLogger(verbose) @@ -206,6 +233,7 @@ export const createSupermemoryContext = ( addMemory, normalizedBaseUrl, apiKey, + promptTemplate, } } @@ -235,6 +263,7 @@ export const transformParamsWithMemory = async ( ctx.mode, ctx.normalizedBaseUrl, ctx.apiKey, + ctx.promptTemplate, ) return transformedParams } diff --git a/packages/tools/src/vercel/util.ts b/packages/tools/src/vercel/util.ts index 572f44a3..eec29859 100644 --- a/packages/tools/src/vercel/util.ts +++ b/packages/tools/src/vercel/util.ts @@ -21,22 +21,43 @@ export type LanguageModelStreamPart = | LanguageModelV2StreamPart | LanguageModelV3StreamPart +/** + * Response structure from the Supermemory profile API. + */ export interface ProfileStructure { profile: { + /** + * Core, stable facts about the user that rarely change. + * Examples: name, profession, long-term preferences, goals. + */ static?: Array<{ memory: string; metadata?: Record }> + /** + * Recently learned or frequently updated information about the user. + * Examples: current projects, recent interests, ongoing topics. + */ dynamic?: Array<{ memory: string; metadata?: Record }> } searchResults: { + /** + * Memories retrieved based on semantic similarity to the current query. + * Most relevant to the immediate conversation context. + */ results: Array<{ memory: string; metadata?: Record }> } } +/** + * Simplified profile data for markdown conversion. + */ export interface ProfileMarkdownData { profile: { + /** Core, stable user facts (name, preferences, goals) */ static?: string[] + /** Recently learned or updated information (current projects, interests) */ dynamic?: string[] } searchResults: { + /** Query-relevant memories based on semantic similarity */ results: Array<{ memory: string }> } }