Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pipeline/preprocessors/link_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,7 @@ class LinkMap(TypedDict):
"ClearToolUsesEdit": "classes/langchain.index.ClearToolUsesEdit.html",
"ContextEdit": "interfaces/langchain.index.ContextEdit.html",
"toolRetryMiddleware": "functions/langchain.index.toolRetryMiddleware.html",
"openAIModerationMiddleware": "functions/langchain.index.openAIModerationMiddleware.html",
},
},
]
Expand Down
198 changes: 198 additions & 0 deletions src/oss/langchain/middleware/built-in.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -2092,9 +2092,25 @@ const agent = createAgent({
:::

:::js
<ParamField body="enableCaching" type="boolean" default="true">
Whether to enable prompt caching. Can be overridden at runtime via `middleware_context`.
</ParamField>

<ParamField body="ttl" type="string" default="5m">
Time to live for cached content. Valid values: `'5m'` or `'1h'`
</ParamField>

<ParamField body="minMessagesToCache" type="number" default="3">
Minimum number of messages required before caching is applied. Caching is skipped if the total message count (including system prompt) is below this threshold.
</ParamField>

<ParamField body="unsupportedModelBehavior" type="string" default="warn">
Behavior when using non-Anthropic models. Options:

- `'ignore'` - Ignore the unsupported model and continue without caching
- `'warn'` (default) - Warn the user and continue without caching
- `'raise'` - Throw an error and stop execution
</ParamField>
:::

</Accordion>
Expand Down Expand Up @@ -2147,6 +2163,7 @@ Please be a helpful assistant.
<Lots more context ...>
`;

// Basic usage with default settings
const agent = createAgent({
model: "claude-sonnet-4-5-20250929",
prompt: LONG_PROMPT,
Expand All @@ -2163,6 +2180,40 @@ await agent.invoke({
const result = await agent.invoke({
messages: [new HumanMessage("What's my name?")]
});

// Custom configuration for longer conversations
const cachingMiddleware = anthropicPromptCachingMiddleware({
ttl: "1h", // Cache for 1 hour instead of default 5 minutes
minMessagesToCache: 5 // Only cache after 5 messages
});

const agentWithCustomConfig = createAgent({
model: "anthropic:claude-3-5-sonnet",
prompt: LONG_PROMPT,
middleware: [cachingMiddleware],
});

// Conditional caching based on runtime context
const conditionalAgent = createAgent({
model: "anthropic:claude-3-5-sonnet",
prompt: LONG_PROMPT,
middleware: [
anthropicPromptCachingMiddleware({
enableCaching: true,
ttl: "5m"
})
],
});

// Disable caching for specific requests
await conditionalAgent.invoke(
{ messages: [new HumanMessage("Process this without caching")] },
{
configurable: {
middleware_context: { enableCaching: false }
}
}
);
```
:::

Expand Down Expand Up @@ -2579,6 +2630,27 @@ agent = create_agent(
```
:::

:::js
**API reference:** @[`openAIModerationMiddleware`]

```typescript
import { createAgent, openAIModerationMiddleware } from "langchain";

const agent = createAgent({
model: "gpt-4o",
tools: [searchTool, databaseTool],
middleware: [
openAIModerationMiddleware({
model: "gpt-4o", // Required: OpenAI model for moderation
checkInput: true,
checkOutput: true,
exitBehavior: "end",
}),
],
});
```
:::

<Accordion title="Configuration options">

:::python
Expand Down Expand Up @@ -2625,6 +2697,46 @@ agent = create_agent(
</ParamField>
:::

:::js
<ParamField body="model" type="string | BaseChatModel" required>
OpenAI model to use for moderation. Can be either a model identifier string (e.g., `'openai:gpt-4o'`) or a `BaseChatModel` instance. The model must be an OpenAI model that supports moderation.
</ParamField>

<ParamField body="moderationModel" type="string" default="omni-moderation-latest">
Moderation model to use. Options: `'omni-moderation-latest'`, `'omni-moderation-2024-09-26'`, `'text-moderation-latest'`, `'text-moderation-stable'`
</ParamField>

<ParamField body="checkInput" type="boolean" default="true">
Whether to check user input messages before the model is called
</ParamField>

<ParamField body="checkOutput" type="boolean" default="true">
Whether to check model output messages after the model is called
</ParamField>

<ParamField body="checkToolResults" type="boolean" default="false">
Whether to check tool result messages before the model is called
</ParamField>

<ParamField body="exitBehavior" type="string" default="end">
How to handle violations when content is flagged. Options:

- `'end'` (default) - End agent execution immediately with a violation message
- `'error'` - Throw `OpenAIModerationError` exception
- `'replace'` - Replace the flagged content with the violation message and continue
</ParamField>

<ParamField body="violationMessage" type="string">
Custom template for violation messages. Supports template variables:

- `{categories}` - Comma-separated list of flagged categories
- `{category_scores}` - JSON string of category scores
- `{original_content}` - The original flagged content

Default: `"I'm sorry, but I can't comply with that request. It was flagged for {categories}."`
</ParamField>
:::

</Accordion>

<Accordion title="Full example">
Expand Down Expand Up @@ -2695,4 +2807,90 @@ agent_replace = create_agent(
```
:::

:::js
```typescript
import { createAgent, openAIModerationMiddleware } from "langchain";

const model = "gpt-4o"

// Basic moderation using model string
const agent = createAgent({
model,
tools: [searchTool, customerDataTool],
middleware: [
openAIModerationMiddleware({
model,
checkInput: true,
checkOutput: true,
}),
],
});

// Using model instance
import { ChatOpenAI } from "@langchain/openai";

const moderationModel = new ChatOpenAI({ model: "gpt-4o-mini" });

const agentWithModelInstance = createAgent({
model: "gpt-4o",
tools: [searchTool, customerDataTool],
middleware: [
openAIModerationMiddleware({
model: moderationModel,
checkInput: true,
checkOutput: true,
}),
],
});

// Strict moderation with custom message
const model = "gpt-4o"
const agentStrict = createAgent({
model,
tools: [searchTool, customerDataTool],
middleware: [
openAIModerationMiddleware({
model,
moderationModel: "omni-moderation-latest",
checkInput: true,
checkOutput: true,
checkToolResults: true,
exitBehavior: "error",
violationMessage: "Content policy violation detected: {categories}. Please rephrase your request.",
}),
],
});

// Moderation with replacement behavior
const model = "gpt-4o"
const agentReplace = createAgent({
model,
tools: [searchTool],
middleware: [
openAIModerationMiddleware({
model,
checkInput: true,
exitBehavior: "replace",
violationMessage: "[Content removed due to safety policies]",
}),
],
});

// Custom violation message with all template variables
const model = "gpt-4o"
const agentCustomMessage = createAgent({
model,
tools: [searchTool],
middleware: [
openAIModerationMiddleware({
model,
checkInput: true,
checkOutput: true,
violationMessage: "Flagged categories: {categories}. Scores: {category_scores}. Original: {original_content}",
}),
],
});
```
:::

</Accordion>
Loading