Skip to content

Commit c6161a4

Browse files
committed
fix: improve the compatibility for conversationId / previousResponseId + tool calls
ref: openai/openai-agents-python#1827
1 parent 42c2e47 commit c6161a4

File tree

7 files changed

+662
-15
lines changed

7 files changed

+662
-15
lines changed

.changeset/heavy-foxes-sit.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
'@openai/agents-core': patch
3+
---
4+
5+
fix: improve the compatibility for conversationId / previousResponseId + tool calls
6+
7+
ref: https://github.com/openai/openai-agents-python/pull/1827

docs/src/content/docs/guides/running-agents.mdx

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ import helloWorldWithRunnerExample from '../../../../../examples/docs/hello-worl
88
import helloWorldExample from '../../../../../examples/docs/hello-world.ts?raw';
99
import runningAgentsExceptionExample from '../../../../../examples/docs/running-agents/exceptions1.ts?raw';
1010
import chatLoopExample from '../../../../../examples/docs/running-agents/chatLoop.ts?raw';
11+
import conversationIdExample from '../../../../../examples/docs/running-agents/conversationId.ts?raw';
12+
import previousResponseIdExample from '../../../../../examples/docs/running-agents/previousResponseId.ts?raw';
1113

1214
Agents do nothing by themselves – you **run** them with the `Runner` class or the `run()` utility.
1315

@@ -95,6 +97,32 @@ Each call to `runner.run()` (or `run()` utility) represents one **turn** in your
9597

9698
See [the chat example](https://github.com/openai/openai-agents-js/tree/main/examples/basic/chat.ts) for an interactive version.
9799

100+
### Server-managed conversations
101+
102+
You can let the OpenAI Responses API persist conversation history for you instead of sending your entire local transcript on every turn. This is useful when you are coordinating long conversations or multiple services. See the [Conversation state guide](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses) for details.
103+
104+
OpenAI exposes two ways to reuse server-side state:
105+
106+
#### 1. `conversationId` for an entire conversation
107+
108+
You can create a conversation once using [Conversations API](https://platform.openai.com/docs/api-reference/conversations/create) and then reuse its ID for every turn. The SDK automatically includes only the newly generated items.
109+
110+
<Code
111+
lang="typescript"
112+
code={conversationIdExample}
113+
title="Reusing a server conversation"
114+
/>
115+
116+
#### 2. `previousResponseId` to continue from the last turn
117+
118+
If you want to start only with Responses API anyway, you can chain each request using the ID returned from the previous response. This keeps the context alive across turns without creating a full conversation resource.
119+
120+
<Code
121+
lang="typescript"
122+
code={previousResponseIdExample}
123+
title="Chaining with previousResponseId"
124+
/>
125+
98126
## Exceptions
99127

100128
The SDK throws a small set of errors you can catch:
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import { Agent, run } from '@openai/agents';
2+
import { OpenAI } from 'openai';
3+
4+
const agent = new Agent({
5+
name: 'Assistant',
6+
instructions: 'Reply very concisely.',
7+
});
8+
9+
async function main() {
10+
// Create a server-managed conversation:
11+
const client = new OpenAI();
12+
const { id: conversationId } = await client.conversations.create({});
13+
14+
const first = await run(agent, 'What city is the Golden Gate Bridge in?', {
15+
conversationId,
16+
});
17+
console.log(first.finalOutput);
18+
// -> "San Francisco"
19+
20+
const second = await run(agent, 'What state is it in?', { conversationId });
21+
console.log(second.finalOutput);
22+
// -> "California"
23+
}
24+
25+
main().catch(console.error);
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import { Agent, run } from '@openai/agents';
2+
3+
const agent = new Agent({
4+
name: 'Assistant',
5+
instructions: 'Reply very concisely.',
6+
});
7+
8+
async function main() {
9+
const first = await run(agent, 'What city is the Golden Gate Bridge in?');
10+
console.log(first.finalOutput);
11+
// -> "San Francisco"
12+
13+
const previousResponseId = first.lastResponseId;
14+
const second = await run(agent, 'What state is it in?', {
15+
previousResponseId,
16+
});
17+
console.log(second.finalOutput);
18+
// -> "California"
19+
}
20+
21+
main().catch(console.error);

packages/agents-core/src/run.ts

Lines changed: 151 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -178,19 +178,114 @@ export function getTracing(
178178
return 'enabled_without_data';
179179
}
180180

181+
function toAgentInputList(
182+
originalInput: string | AgentInputItem[],
183+
): AgentInputItem[] {
184+
if (typeof originalInput === 'string') {
185+
return [{ type: 'message', role: 'user', content: originalInput }];
186+
}
187+
188+
return [...originalInput];
189+
}
190+
191+
/**
192+
* Internal module for tracking the items in turns and ensuring that we don't send duplicate items.
193+
* This logic is vital for properly handling the items to send during multiple turns
194+
* when you use either `conversationId` or `previousResponseId`.
195+
* Both scenarios expect an agent loop to send only the new items for each Responses API cal.
196+
*
197+
* see also: https://platform.openai.com/docs/guides/conversation-state?api-mode=responses
198+
*/
199+
class ServerConversationTracker {
200+
// Conversation ID:
201+
// - https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#using-the-conversations-api
202+
// - https://platform.openai.com/docs/api-reference/conversations/create
203+
public conversationId?: string;
204+
205+
// Previous Response ID:
206+
// https://platform.openai.com/docs/guides/conversation-state?api-mode=responses#passing-context-from-the-previous-response
207+
public previousResponseId?: string;
208+
209+
// Using this flag because WeakSet does not provides a way to check its size
210+
private sentInitialInput = false;
211+
// The items already sent to the model; using WeakSet for memory efficiency
212+
private sentItems = new WeakSet<object>();
213+
// The items received from the server; using WeakSet for memory efficiency
214+
private serverItems = new WeakSet<object>();
215+
216+
constructor({
217+
conversationId,
218+
previousResponseId,
219+
}: {
220+
conversationId?: string;
221+
previousResponseId?: string;
222+
}) {
223+
this.conversationId = conversationId ?? undefined;
224+
this.previousResponseId = previousResponseId ?? undefined;
225+
}
226+
227+
trackServerItems(modelResponse: ModelResponse | undefined) {
228+
if (!modelResponse) {
229+
return;
230+
}
231+
for (const item of modelResponse.output) {
232+
if (item && typeof item === 'object') {
233+
this.serverItems.add(item);
234+
}
235+
}
236+
if (
237+
!this.conversationId &&
238+
this.previousResponseId !== undefined &&
239+
modelResponse.responseId
240+
) {
241+
this.previousResponseId = modelResponse.responseId;
242+
}
243+
}
244+
245+
prepareInput(
246+
originalInput: string | AgentInputItem[],
247+
generatedItems: RunItem[],
248+
): AgentInputItem[] {
249+
const inputItems: AgentInputItem[] = [];
250+
251+
if (!this.sentInitialInput) {
252+
const initialItems = toAgentInputList(originalInput);
253+
for (const item of initialItems) {
254+
inputItems.push(item);
255+
if (item && typeof item === 'object') {
256+
this.sentItems.add(item);
257+
}
258+
}
259+
this.sentInitialInput = true;
260+
}
261+
262+
for (const item of generatedItems) {
263+
if (item.type === 'tool_approval_item') {
264+
continue;
265+
}
266+
const rawItem = item.rawItem;
267+
if (!rawItem || typeof rawItem !== 'object') {
268+
continue;
269+
}
270+
if (this.sentItems.has(rawItem) || this.serverItems.has(rawItem)) {
271+
continue;
272+
}
273+
inputItems.push(rawItem as AgentInputItem);
274+
this.sentItems.add(rawItem);
275+
}
276+
277+
return inputItems;
278+
}
279+
}
280+
181281
export function getTurnInput(
182282
originalInput: string | AgentInputItem[],
183283
generatedItems: RunItem[],
184284
): AgentInputItem[] {
185285
const rawItems = generatedItems
186286
.filter((item) => item.type !== 'tool_approval_item') // don't include approval items to avoid double function calls
187287
.map((item) => item.rawItem);
188-
189-
if (typeof originalInput === 'string') {
190-
originalInput = [{ type: 'message', role: 'user', content: originalInput }];
191-
}
192-
193-
return [...originalInput, ...rawItems];
288+
return [...toAgentInputList(originalInput), ...rawItems];
194289
}
195290

196291
/**
@@ -254,6 +349,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
254349
options.maxTurns ?? DEFAULT_MAX_TURNS,
255350
);
256351

352+
const serverConversationTracker =
353+
options.conversationId || options.previousResponseId
354+
? new ServerConversationTracker({
355+
conversationId: options.conversationId,
356+
previousResponseId: options.previousResponseId,
357+
})
358+
: undefined;
359+
257360
try {
258361
while (true) {
259362
const explictlyModelSet =
@@ -355,10 +458,12 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
355458
await this.#runInputGuardrails(state);
356459
}
357460

358-
const turnInput = getTurnInput(
359-
state._originalInput,
360-
state._generatedItems,
361-
);
461+
const turnInput = serverConversationTracker
462+
? serverConversationTracker.prepareInput(
463+
state._originalInput,
464+
state._generatedItems,
465+
)
466+
: getTurnInput(state._originalInput, state._generatedItems);
362467

363468
if (state._noActiveAgentRun) {
364469
state._currentAgent.emit(
@@ -385,14 +490,21 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
385490
state._toolUseTracker,
386491
modelSettings,
387492
);
493+
const previousResponseId =
494+
serverConversationTracker?.previousResponseId ??
495+
options.previousResponseId;
496+
const conversationId =
497+
serverConversationTracker?.conversationId ??
498+
options.conversationId;
499+
388500
state._lastTurnResponse = await model.getResponse({
389501
systemInstructions: await state._currentAgent.getSystemPrompt(
390502
state._context,
391503
),
392504
prompt: await state._currentAgent.getPrompt(state._context),
393505
input: turnInput,
394-
previousResponseId: options.previousResponseId,
395-
conversationId: options.conversationId,
506+
previousResponseId,
507+
conversationId,
396508
modelSettings,
397509
tools: serializedTools,
398510
outputType: convertAgentOutputTypeToSerializable(
@@ -409,6 +521,10 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
409521
state._context.usage.add(state._lastTurnResponse.usage);
410522
state._noActiveAgentRun = false;
411523

524+
serverConversationTracker?.trackServerItems(
525+
state._lastTurnResponse,
526+
);
527+
412528
const processedResponse = processModelResponse(
413529
state._lastTurnResponse,
414530
state._currentAgent,
@@ -623,6 +739,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
623739
result: StreamedRunResult<TContext, TAgent>,
624740
options: StreamRunOptions<TContext>,
625741
): Promise<void> {
742+
const serverConversationTracker =
743+
options.conversationId || options.previousResponseId
744+
? new ServerConversationTracker({
745+
conversationId: options.conversationId,
746+
previousResponseId: options.previousResponseId,
747+
})
748+
: undefined;
749+
626750
try {
627751
while (true) {
628752
const currentAgent = result.state._currentAgent;
@@ -739,7 +863,12 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
739863
modelSettings,
740864
);
741865

742-
const turnInput = getTurnInput(result.input, result.newItems);
866+
const turnInput = serverConversationTracker
867+
? serverConversationTracker.prepareInput(
868+
result.input,
869+
result.newItems,
870+
)
871+
: getTurnInput(result.input, result.newItems);
743872

744873
if (result.state._noActiveAgentRun) {
745874
currentAgent.emit(
@@ -752,14 +881,20 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
752881

753882
let finalResponse: ModelResponse | undefined = undefined;
754883

884+
const previousResponseId =
885+
serverConversationTracker?.previousResponseId ??
886+
options.previousResponseId;
887+
const conversationId =
888+
serverConversationTracker?.conversationId ?? options.conversationId;
889+
755890
for await (const event of model.getStreamedResponse({
756891
systemInstructions: await currentAgent.getSystemPrompt(
757892
result.state._context,
758893
),
759894
prompt: await currentAgent.getPrompt(result.state._context),
760895
input: turnInput,
761-
previousResponseId: options.previousResponseId,
762-
conversationId: options.conversationId,
896+
previousResponseId,
897+
conversationId,
763898
modelSettings,
764899
tools: serializedTools,
765900
handoffs: serializedHandoffs,
@@ -798,6 +933,7 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
798933
}
799934

800935
result.state._lastTurnResponse = finalResponse;
936+
serverConversationTracker?.trackServerItems(finalResponse);
801937
result.state._modelResponses.push(result.state._lastTurnResponse);
802938

803939
const processedResponse = processModelResponse(

0 commit comments

Comments
 (0)