Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@
},
{
"group": "Supermemory MCP",
"pages": ["mcp", "supermemory-mcp/setup"]
"pages": ["supermemory-mcp/mcp", "supermemory-mcp/setup"]
},
{
"group": "Migration Guides",
Expand Down
2 changes: 1 addition & 1 deletion apps/docs/supermemory-mcp/introduction.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ description: 'Give your AI assistants persistent memory with the Model Context P

Supermemory MCP Server 4.0 is a lightweight component that gives AI assistants persistent memory across conversations. It serves as a universal memory layer enabling Large Language Models (LLMs) to maintain context and memories across different applications and sessions, solving the fundamental limitation of AI assistants forgetting everything between conversations.

<Card title="Quick Start" icon="rocket" href="/mcp">
<Card title="Quick Start" icon="rocket" href="/supermemory-mcp/mcp">
Jump to installation and setup
</Card>

Expand Down
File renamed without changes.
35 changes: 35 additions & 0 deletions packages/tools/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,41 @@ const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", {
})
```

#### Custom Prompt Templates

Customize how memories are formatted and injected into the system prompt using the `promptTemplate` option. This is useful for:
- Using XML-based prompting (e.g., for Claude models)
- Custom branding (removing "supermemories" references)
- Controlling how your agent describes where information comes from

```typescript
import { generateText } from "ai"
import { withSupermemory, type MemoryPromptData } from "@supermemory/tools/ai-sdk"
import { openai } from "@ai-sdk/openai"

const customPrompt = (data: MemoryPromptData) => `
<user_memories>
Here is some information about your past conversations with the user:
${data.userMemories}
${data.generalSearchMemories}
</user_memories>
`.trim()

const modelWithCustomPrompt = withSupermemory(openai("gpt-4"), "user-123", {
mode: "full",
promptTemplate: customPrompt,
})

const result = await generateText({
model: modelWithCustomPrompt,
messages: [{ role: "user", content: "What do you know about me?" }],
})
```

The `MemoryPromptData` object provides:
- `userMemories`: Pre-formatted markdown combining static profile facts (name, preferences, goals) and dynamic context (current projects, recent interests)
- `generalSearchMemories`: Pre-formatted search results based on semantic similarity to the current query

### OpenAI SDK Usage

#### OpenAI Middleware with Supermemory
Expand Down
2 changes: 1 addition & 1 deletion packages/tools/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
"version": "1.3.65",
"version": "1.3.66",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
Expand Down
35 changes: 35 additions & 0 deletions packages/tools/src/vercel/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,46 @@ import {
extractAssistantResponseText,
saveMemoryAfterResponse,
} from "./middleware"
import type { PromptTemplate, MemoryPromptData } from "./memory-prompt"

interface WrapVercelLanguageModelOptions {
/** Optional conversation ID to group messages for contextual memory generation */
conversationId?: string
/** Enable detailed logging of memory search and injection */
verbose?: boolean
/**
* Memory retrieval mode:
* - "profile": Retrieves user profile memories (static + dynamic) without query filtering
* - "query": Searches memories based on semantic similarity to the user's message
* - "full": Combines both profile and query-based results
*/
mode?: "profile" | "query" | "full"
/**
* Memory persistence mode:
* - "always": Automatically save conversations as memories
* - "never": Only retrieve memories, don't store new ones
*/
addMemory?: "always" | "never"
/** Supermemory API key (falls back to SUPERMEMORY_API_KEY env var) */
apiKey?: string
/** Custom Supermemory API base URL */
baseUrl?: string
/**
* Custom function to format memory data into the system prompt.
* If not provided, uses the default "User Supermemories:" format.
*
* @example
* ```typescript
* promptTemplate: (data) => `
* <user_memories>
* Here is some information about your past conversations:
* ${data.userMemories}
* ${data.generalSearchMemories}
* </user_memories>
* `.trim()
* ```
*/
promptTemplate?: PromptTemplate
}

/**
Expand Down Expand Up @@ -84,6 +116,7 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
mode: options?.mode ?? "profile",
addMemory: options?.addMemory ?? "never",
baseUrl: options?.baseUrl,
promptTemplate: options?.promptTemplate,
})

const wrappedModel = {
Expand Down Expand Up @@ -183,4 +216,6 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
export {
wrapVercelLanguageModel as withSupermemory,
type WrapVercelLanguageModelOptions as WithSupermemoryOptions,
type PromptTemplate,
type MemoryPromptData,
}
99 changes: 72 additions & 27 deletions packages/tools/src/vercel/memory-prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,46 @@ import {
type ProfileStructure,
} from "./util"

/**
* Data provided to the prompt template function for customizing memory injection.
*/
export interface MemoryPromptData {
/**
* Pre-formatted markdown combining static and dynamic profile memories.
* Contains core user facts (name, preferences, goals) and recent context (projects, interests).
*/
userMemories: string
/**
* Pre-formatted search results text for the current query.
* Contains memories retrieved based on semantic similarity to the conversation.
* Empty string if mode is "profile" only.
*/
generalSearchMemories: string
}

/**
* Function type for customizing the memory prompt injection.
* Return the full string to be injected into the system prompt.
*
* @example
* ```typescript
* const promptTemplate: PromptTemplate = (data) => `
* <user_memories>
* Here is some information about your past conversations:
* ${data.userMemories}
* ${data.generalSearchMemories}
* </user_memories>
* `.trim()
* ```
*/
export type PromptTemplate = (data: MemoryPromptData) => string

/**
* Default prompt template that replicates the original behavior.
*/
export const defaultPromptTemplate: PromptTemplate = (data) =>
`User Supermemories: \n${data.userMemories}\n${data.generalSearchMemories}`.trim()

export const normalizeBaseUrl = (url?: string): string => {
const defaultUrl = "https://api.supermemory.ai"
if (!url) return defaultUrl
Expand All @@ -20,12 +60,12 @@ const supermemoryProfileSearch = async (
): Promise<ProfileStructure> => {
const payload = queryText
? JSON.stringify({
q: queryText,
containerTag: containerTag,
})
q: queryText,
containerTag: containerTag,
})
: JSON.stringify({
containerTag: containerTag,
})
containerTag: containerTag,
})

try {
const response = await fetch(`${baseUrl}/v4/profile`, {
Expand Down Expand Up @@ -60,6 +100,7 @@ export const addSystemPrompt = async (
mode: "profile" | "query" | "full",
baseUrl: string,
apiKey: string,
promptTemplate: PromptTemplate = defaultPromptTemplate,
): Promise<LanguageModelCallOptions> => {
const systemPromptExists = params.prompt.some(
(prompt) => prompt.role === "system",
Expand All @@ -68,16 +109,16 @@ export const addSystemPrompt = async (
const queryText =
mode !== "profile"
? params.prompt
.slice()
.reverse()
.find((prompt: { role: string }) => prompt.role === "user")
?.content?.filter(
(content: { type: string }) => content.type === "text",
)
?.map((content: { type: string; text: string }) =>
content.type === "text" ? content.text : "",
)
?.join(" ") || ""
.slice()
.reverse()
.find((prompt: { role: string }) => prompt.role === "user")
?.content?.filter(
(content: { type: string }) => content.type === "text",
)
?.map((content: { type: string; text: string }) =>
content.type === "text" ? content.text : "",
)
?.join(" ") || ""
: ""

const memoriesResponse = await supermemoryProfileSearch(
Expand Down Expand Up @@ -120,25 +161,29 @@ export const addSystemPrompt = async (
},
})

const profileData =
const userMemories =
mode !== "query"
? convertProfileToMarkdown({
profile: {
static: deduplicated.static,
dynamic: deduplicated.dynamic,
},
searchResults: { results: [] },
})
profile: {
static: deduplicated.static,
dynamic: deduplicated.dynamic,
},
searchResults: { results: [] },
})
: ""
const searchResultsMemories =
const generalSearchMemories =
mode !== "profile"
? `Search results for user's recent message: \n${deduplicated.searchResults
.map((memory) => `- ${memory}`)
.join("\n")}`
.map((memory) => `- ${memory}`)
.join("\n")}`
: ""

const memories =
`User Supermemories: \n${profileData}\n${searchResultsMemories}`.trim()
const promptData: MemoryPromptData = {
userMemories,
generalSearchMemories,
}

const memories = promptTemplate(promptData)
if (memories) {
logger.debug("Memory content preview", {
content: memories,
Expand Down
55 changes: 42 additions & 13 deletions packages/tools/src/vercel/middleware.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@ import {
getLastUserMessage,
filterOutSupermemories,
} from "./util"
import { addSystemPrompt, normalizeBaseUrl } from "./memory-prompt"
import {
addSystemPrompt,
normalizeBaseUrl,
type PromptTemplate,
} from "./memory-prompt"

export const getConversationContent = (params: LanguageModelCallOptions) => {
return params.prompt
Expand Down Expand Up @@ -39,24 +43,24 @@ export const convertToConversationMessages = (
const messages: ConversationMessage[] = []

for (const msg of params.prompt) {
if (msg.role === "system") {
continue
}

if (typeof msg.content === "string") {
const filteredContent = filterOutSupermemories(msg.content)
if (filteredContent) {
if (msg.content) {
messages.push({
role: msg.role as "user" | "assistant" | "system" | "tool",
content: filteredContent,
role: msg.role as "user" | "assistant" | "tool",
content: msg.content,
})
}
} else {
const contentParts = msg.content
.map((c) => {
if (c.type === "text") {
const filteredText = filterOutSupermemories(c.text)
if (filteredText) {
return {
type: "text" as const,
text: filteredText,
}
if (c.type === "text" && c.text) {
return {
type: "text" as const,
text: c.text,
}
}
if (
Expand All @@ -75,7 +79,7 @@ export const convertToConversationMessages = (

if (contentParts.length > 0) {
messages.push({
role: msg.role as "user" | "assistant" | "system" | "tool",
role: msg.role as "user" | "assistant" | "tool",
content: contentParts,
})
}
Expand Down Expand Up @@ -153,14 +157,35 @@ export const saveMemoryAfterResponse = async (
}
}

/**
* Configuration options for the Supermemory middleware.
*/
export interface SupermemoryMiddlewareOptions {
/** Container tag/identifier for memory search (e.g., user ID, project ID) */
containerTag: string
/** Supermemory API key */
apiKey: string
/** Optional conversation ID to group messages for contextual memory generation */
conversationId?: string
/** Enable detailed logging of memory search and injection */
verbose?: boolean
/**
* Memory retrieval mode:
* - "profile": Retrieves user profile memories (static + dynamic) without query filtering
* - "query": Searches memories based on semantic similarity to the user's message
* - "full": Combines both profile and query-based results
*/
mode?: "profile" | "query" | "full"
/**
* Memory persistence mode:
* - "always": Automatically save conversations as memories
* - "never": Only retrieve memories, don't store new ones
*/
addMemory?: "always" | "never"
/** Custom Supermemory API base URL */
baseUrl?: string
/** Custom function to format memory data into the system prompt */
promptTemplate?: PromptTemplate
}

export interface SupermemoryMiddlewareContext {
Expand All @@ -172,6 +197,7 @@ export interface SupermemoryMiddlewareContext {
addMemory: "always" | "never"
normalizedBaseUrl: string
apiKey: string
promptTemplate?: PromptTemplate
}

export const createSupermemoryContext = (
Expand All @@ -185,6 +211,7 @@ export const createSupermemoryContext = (
mode = "profile",
addMemory = "never",
baseUrl,
promptTemplate,
} = options

const logger = createLogger(verbose)
Expand All @@ -206,6 +233,7 @@ export const createSupermemoryContext = (
addMemory,
normalizedBaseUrl,
apiKey,
promptTemplate,
}
}

Expand Down Expand Up @@ -235,6 +263,7 @@ export const transformParamsWithMemory = async (
ctx.mode,
ctx.normalizedBaseUrl,
ctx.apiKey,
ctx.promptTemplate,
)
return transformedParams
}
Expand Down
Loading
Loading