Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changeset/olive-waves-type.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@openai/agents-openai': patch
'@openai/agents-core': patch
---

feat: Add reasoning.effort: none parameter for gpt-5.1
50 changes: 50 additions & 0 deletions examples/basic/hello-world-gpt-5.1.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import {
Agent,
OpenAIChatCompletionsModel,
run,
withTrace,
} from '@openai/agents';
import OpenAI from 'openai';
import { z } from 'zod';

const output = z.object({
title: z.string(),
description: z.string(),
});

async function main() {
withTrace('GPT-5.1 None Reasoning Assistant', async () => {
const prompt =
'Tell me about recursion in programming in a few sentences. Quickly responding with a single answer is fine.';
const agent = new Agent({
name: 'GPT-5.1 Responses Assistant',
model: 'gpt-5.1',
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'none' },
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The default effort is none so are you adding it here just to demo?

text: { verbosity: 'low' },
},
outputType: output,
});
const result = await run(agent, prompt);
console.log(result.finalOutput);

const completionsAgent = new Agent({
name: 'GPT-5.1 Chat Completions Assistant',
model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5.1'),
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'none' },
text: { verbosity: 'low' },
},
outputType: output,
});
const completionsResult = await run(completionsAgent, prompt);
console.log(completionsResult.finalOutput);
});
}

main().catch((error) => {
console.error(error);
process.exit(1);
});
105 changes: 56 additions & 49 deletions examples/basic/hello-world-gpt-5.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
import { Agent, OpenAIChatCompletionsModel, run } from '@openai/agents';
import {
Agent,
OpenAIChatCompletionsModel,
run,
withTrace,
} from '@openai/agents';
import OpenAI from 'openai';
import { z } from 'zod';

Expand All @@ -8,59 +13,61 @@ const output = z.object({
});

async function main() {
const prompt =
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';
withTrace('GPT-5 Assistant', async () => {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

non-essential changes in this existing example: just added withTrace and ajusted the prompt

const prompt =
'Tell me about recursion in programming in a few sentences. Quickly responding with a single answer is fine.';

const agent = new Agent({
name: 'GPT-5 Assistant',
model: 'gpt-5',
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});
const agent = new Agent({
name: 'GPT-5 Assistant',
model: 'gpt-5',
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});

const result = await run(agent, prompt);
console.log(result.finalOutput);
const result = await run(agent, prompt);
console.log(result.finalOutput);

// The following code works in the same way:
// const agent2 = agent.clone({
// modelSettings: {
// providerData: {
// reasoning: { effort: 'minimal' },
// text: { verbosity: 'low' },
// }
// },
// });
// const result2 = await run(agent2, prompt);
// console.log(result2.finalOutput);
// The following code works in the same way:
// const agent2 = agent.clone({
// modelSettings: {
// providerData: {
// reasoning: { effort: 'minimal' },
// text: { verbosity: 'low' },
// }
// },
// });
// const result2 = await run(agent2, prompt);
// console.log(result2.finalOutput);

const completionsAgent = new Agent({
name: 'GPT-5 Assistant',
model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'),
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});
const completionsResult = await run(completionsAgent, prompt);
console.log(completionsResult.finalOutput);
const completionsAgent = new Agent({
name: 'GPT-5 Assistant',
model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'),
instructions: "You're a helpful assistant.",
modelSettings: {
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});
const completionsResult = await run(completionsAgent, prompt);
console.log(completionsResult.finalOutput);

// The following code works in the same way:
// const completionsAgent2 = completionsAgent.clone({
// modelSettings: {
// providerData: {
// reasoning_effort: 'minimal',
// verbosity: 'low',
// }
// },
// });
// const completionsResult2 = await run(completionsAgent2, prompt);
// console.log(completionsResult2.finalOutput);
// The following code works in the same way:
// const completionsAgent2 = completionsAgent.clone({
// modelSettings: {
// providerData: {
// reasoning_effort: 'minimal',
// verbosity: 'low',
// }
// },
// });
// const completionsResult2 = await run(completionsAgent2, prompt);
// console.log(completionsResult2.finalOutput);
});
}

main().catch((error) => {
Expand Down
1 change: 1 addition & 0 deletions examples/basic/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
"start:dynamic-system-prompt": "tsx dynamic-system-prompt.ts",
"start:hello-world": "tsx hello-world.ts",
"start:hello-world-gpt-5": "tsx hello-world-gpt-5.ts",
"start:hello-world-gpt-5.1": "tsx hello-world-gpt-5.1.ts",
"start:hello-world-gpt-oss": "tsx hello-world-gpt-oss.ts",
"start:lifecycle-example": "tsx lifecycle-example.ts",
"start:local-image": "tsx local-image.ts",
Expand Down
3 changes: 2 additions & 1 deletion packages/agents-core/src/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ export type ModelSettingsToolChoice =
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*/
export type ModelSettingsReasoningEffort =
| 'minimal'
| 'none' // for gpt-5.1
| 'minimal' // for gpt-5
| 'low'
| 'medium'
| 'high'
Expand Down
29 changes: 29 additions & 0 deletions packages/agents-openai/test/openaiChatCompletionsModel.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,35 @@ describe('OpenAIChatCompletionsModel', () => {
expect(options).toEqual({ headers: HEADERS, signal: undefined });
});

it('passes none reasoning effort through to chat completions payloads', async () => {
const client = new FakeClient();
const response = {
id: 'gpt-5.1-response',
choices: [{ message: { content: 'done' } }],
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
} as any;
client.chat.completions.create.mockResolvedValue(response);

const model = new OpenAIChatCompletionsModel(client as any, 'gpt-5.1');
const req: any = {
input: 'prompt',
modelSettings: {
reasoning: { effort: 'none' },
},
tools: [],
outputType: 'text',
handoffs: [],
tracing: false,
};

await withTrace('gpt-5.1 none', () => model.getResponse(req));

expect(client.chat.completions.create).toHaveBeenCalledTimes(1);
const [args, options] = client.chat.completions.create.mock.calls[0];
expect(args.reasoning_effort).toBe('none');
expect(options).toEqual({ headers: HEADERS, signal: undefined });
});

it('handles function tool calls', async () => {
const client = new FakeClient();
const response = {
Expand Down
37 changes: 37 additions & 0 deletions packages/agents-openai/test/openaiResponsesModel.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,43 @@ describe('OpenAIResponsesModel', () => {
});
});

it('passes none reasoning effort to the Responses API payload', async () => {
await withTrace('test', async () => {
const fakeResponse = {
id: 'res-none',
usage: {
input_tokens: 1,
output_tokens: 1,
total_tokens: 2,
},
output: [],
};
const createMock = vi.fn().mockResolvedValue(fakeResponse);
const fakeClient = {
responses: { create: createMock },
} as unknown as OpenAI;
const model = new OpenAIResponsesModel(fakeClient, 'gpt-5.1');
const request = {
systemInstructions: undefined,
input: 'hi',
modelSettings: {
reasoning: { effort: 'none' },
},
tools: [],
outputType: 'text',
handoffs: [],
tracing: false,
signal: undefined,
};

await model.getResponse(request as any);

expect(createMock).toHaveBeenCalledTimes(1);
const [args] = createMock.mock.calls[0];
expect(args.reasoning).toEqual({ effort: 'none' });
});
});

it('getStreamedResponse yields events and calls client with stream flag', async () => {
await withTrace('test', async () => {
const fakeResponse = { id: 'res2', usage: {}, output: [] };
Expand Down