diff --git a/.changeset/olive-waves-type.md b/.changeset/olive-waves-type.md new file mode 100644 index 00000000..b0a201aa --- /dev/null +++ b/.changeset/olive-waves-type.md @@ -0,0 +1,6 @@ +--- +'@openai/agents-openai': patch +'@openai/agents-core': patch +--- + +feat: Add reasoning.effort: none parameter for gpt-5.1 diff --git a/examples/basic/hello-world-gpt-5.1.ts b/examples/basic/hello-world-gpt-5.1.ts new file mode 100644 index 00000000..b1016cf5 --- /dev/null +++ b/examples/basic/hello-world-gpt-5.1.ts @@ -0,0 +1,50 @@ +import { + Agent, + OpenAIChatCompletionsModel, + run, + withTrace, +} from '@openai/agents'; +import OpenAI from 'openai'; +import { z } from 'zod'; + +const output = z.object({ + title: z.string(), + description: z.string(), +}); + +async function main() { + withTrace('GPT-5.1 None Reasoning Assistant', async () => { + const prompt = + 'Tell me about recursion in programming in a few sentences. Quickly responding with a single answer is fine.'; + const agent = new Agent({ + name: 'GPT-5.1 Responses Assistant', + model: 'gpt-5.1', + instructions: "You're a helpful assistant.", + modelSettings: { + reasoning: { effort: 'none' }, + text: { verbosity: 'low' }, + }, + outputType: output, + }); + const result = await run(agent, prompt); + console.log(result.finalOutput); + + const completionsAgent = new Agent({ + name: 'GPT-5.1 Chat Completions Assistant', + model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5.1'), + instructions: "You're a helpful assistant.", + modelSettings: { + reasoning: { effort: 'none' }, + text: { verbosity: 'low' }, + }, + outputType: output, + }); + const completionsResult = await run(completionsAgent, prompt); + console.log(completionsResult.finalOutput); + }); +} + +main().catch((error) => { + console.error(error); + process.exit(1); +}); diff --git a/examples/basic/hello-world-gpt-5.ts b/examples/basic/hello-world-gpt-5.ts index ab92b50f..5d27fcce 100644 --- a/examples/basic/hello-world-gpt-5.ts +++ b/examples/basic/hello-world-gpt-5.ts @@ -1,4 +1,9 @@ -import { Agent, OpenAIChatCompletionsModel, run } from '@openai/agents'; +import { + Agent, + OpenAIChatCompletionsModel, + run, + withTrace, +} from '@openai/agents'; import OpenAI from 'openai'; import { z } from 'zod'; @@ -8,59 +13,61 @@ const output = z.object({ }); async function main() { - const prompt = - 'Tell me about recursion in programming. Quickly responding with a single answer is fine.'; + withTrace('GPT-5 Assistant', async () => { + const prompt = + 'Tell me about recursion in programming in a few sentences. Quickly responding with a single answer is fine.'; - const agent = new Agent({ - name: 'GPT-5 Assistant', - model: 'gpt-5', - instructions: "You're a helpful assistant.", - modelSettings: { - reasoning: { effort: 'minimal' }, - text: { verbosity: 'low' }, - }, - outputType: output, - }); + const agent = new Agent({ + name: 'GPT-5 Assistant', + model: 'gpt-5', + instructions: "You're a helpful assistant.", + modelSettings: { + reasoning: { effort: 'minimal' }, + text: { verbosity: 'low' }, + }, + outputType: output, + }); - const result = await run(agent, prompt); - console.log(result.finalOutput); + const result = await run(agent, prompt); + console.log(result.finalOutput); - // The following code works in the same way: - // const agent2 = agent.clone({ - // modelSettings: { - // providerData: { - // reasoning: { effort: 'minimal' }, - // text: { verbosity: 'low' }, - // } - // }, - // }); - // const result2 = await run(agent2, prompt); - // console.log(result2.finalOutput); + // The following code works in the same way: + // const agent2 = agent.clone({ + // modelSettings: { + // providerData: { + // reasoning: { effort: 'minimal' }, + // text: { verbosity: 'low' }, + // } + // }, + // }); + // const result2 = await run(agent2, prompt); + // console.log(result2.finalOutput); - const completionsAgent = new Agent({ - name: 'GPT-5 Assistant', - model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'), - instructions: "You're a helpful assistant.", - modelSettings: { - reasoning: { effort: 'minimal' }, - text: { verbosity: 'low' }, - }, - outputType: output, - }); - const completionsResult = await run(completionsAgent, prompt); - console.log(completionsResult.finalOutput); + const completionsAgent = new Agent({ + name: 'GPT-5 Assistant', + model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'), + instructions: "You're a helpful assistant.", + modelSettings: { + reasoning: { effort: 'minimal' }, + text: { verbosity: 'low' }, + }, + outputType: output, + }); + const completionsResult = await run(completionsAgent, prompt); + console.log(completionsResult.finalOutput); - // The following code works in the same way: - // const completionsAgent2 = completionsAgent.clone({ - // modelSettings: { - // providerData: { - // reasoning_effort: 'minimal', - // verbosity: 'low', - // } - // }, - // }); - // const completionsResult2 = await run(completionsAgent2, prompt); - // console.log(completionsResult2.finalOutput); + // The following code works in the same way: + // const completionsAgent2 = completionsAgent.clone({ + // modelSettings: { + // providerData: { + // reasoning_effort: 'minimal', + // verbosity: 'low', + // } + // }, + // }); + // const completionsResult2 = await run(completionsAgent2, prompt); + // console.log(completionsResult2.finalOutput); + }); } main().catch((error) => { diff --git a/examples/basic/package.json b/examples/basic/package.json index 9417c1de..79ad6a28 100644 --- a/examples/basic/package.json +++ b/examples/basic/package.json @@ -12,6 +12,7 @@ "start:dynamic-system-prompt": "tsx dynamic-system-prompt.ts", "start:hello-world": "tsx hello-world.ts", "start:hello-world-gpt-5": "tsx hello-world-gpt-5.ts", + "start:hello-world-gpt-5.1": "tsx hello-world-gpt-5.1.ts", "start:hello-world-gpt-oss": "tsx hello-world-gpt-oss.ts", "start:lifecycle-example": "tsx lifecycle-example.ts", "start:local-image": "tsx local-image.ts", diff --git a/packages/agents-core/src/model.ts b/packages/agents-core/src/model.ts index 97ddb6f3..05d28b06 100644 --- a/packages/agents-core/src/model.ts +++ b/packages/agents-core/src/model.ts @@ -30,7 +30,8 @@ export type ModelSettingsToolChoice = * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. */ export type ModelSettingsReasoningEffort = - | 'minimal' + | 'none' // for gpt-5.1 + | 'minimal' // for gpt-5 | 'low' | 'medium' | 'high' diff --git a/packages/agents-openai/test/openaiChatCompletionsModel.test.ts b/packages/agents-openai/test/openaiChatCompletionsModel.test.ts index fbbb10fe..0b527eb1 100644 --- a/packages/agents-openai/test/openaiChatCompletionsModel.test.ts +++ b/packages/agents-openai/test/openaiChatCompletionsModel.test.ts @@ -283,6 +283,35 @@ describe('OpenAIChatCompletionsModel', () => { expect(options).toEqual({ headers: HEADERS, signal: undefined }); }); + it('passes none reasoning effort through to chat completions payloads', async () => { + const client = new FakeClient(); + const response = { + id: 'gpt-5.1-response', + choices: [{ message: { content: 'done' } }], + usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }, + } as any; + client.chat.completions.create.mockResolvedValue(response); + + const model = new OpenAIChatCompletionsModel(client as any, 'gpt-5.1'); + const req: any = { + input: 'prompt', + modelSettings: { + reasoning: { effort: 'none' }, + }, + tools: [], + outputType: 'text', + handoffs: [], + tracing: false, + }; + + await withTrace('gpt-5.1 none', () => model.getResponse(req)); + + expect(client.chat.completions.create).toHaveBeenCalledTimes(1); + const [args, options] = client.chat.completions.create.mock.calls[0]; + expect(args.reasoning_effort).toBe('none'); + expect(options).toEqual({ headers: HEADERS, signal: undefined }); + }); + it('handles function tool calls', async () => { const client = new FakeClient(); const response = { diff --git a/packages/agents-openai/test/openaiResponsesModel.test.ts b/packages/agents-openai/test/openaiResponsesModel.test.ts index bc6beb82..b44a60e8 100644 --- a/packages/agents-openai/test/openaiResponsesModel.test.ts +++ b/packages/agents-openai/test/openaiResponsesModel.test.ts @@ -253,6 +253,43 @@ describe('OpenAIResponsesModel', () => { }); }); + it('passes none reasoning effort to the Responses API payload', async () => { + await withTrace('test', async () => { + const fakeResponse = { + id: 'res-none', + usage: { + input_tokens: 1, + output_tokens: 1, + total_tokens: 2, + }, + output: [], + }; + const createMock = vi.fn().mockResolvedValue(fakeResponse); + const fakeClient = { + responses: { create: createMock }, + } as unknown as OpenAI; + const model = new OpenAIResponsesModel(fakeClient, 'gpt-5.1'); + const request = { + systemInstructions: undefined, + input: 'hi', + modelSettings: { + reasoning: { effort: 'none' }, + }, + tools: [], + outputType: 'text', + handoffs: [], + tracing: false, + signal: undefined, + }; + + await model.getResponse(request as any); + + expect(createMock).toHaveBeenCalledTimes(1); + const [args] = createMock.mock.calls[0]; + expect(args.reasoning).toEqual({ effort: 'none' }); + }); + }); + it('getStreamedResponse yields events and calls client with stream flag', async () => { await withTrace('test', async () => { const fakeResponse = { id: 'res2', usage: {}, output: [] };