From 2a23cb7b24f9842b9a528564cbd748a4d00b0874 Mon Sep 17 00:00:00 2001 From: myan Date: Sat, 26 Apr 2025 13:55:34 +0800 Subject: [PATCH 1/4] rollback Signed-off-by: myan --- examples/basic/agent_lifecycle_example.py | 6 ++--- examples/basic/lifecycle_example.py | 6 ++--- src/agents/__init__.py | 2 ++ src/agents/_run_impl.py | 32 +++++++---------------- src/agents/lifecycle.py | 10 +++---- src/agents/tool.py | 14 ++++++++++ tests/test_agent_hooks.py | 4 +-- tests/test_computer_action.py | 12 +++++---- 8 files changed, 46 insertions(+), 40 deletions(-) diff --git a/examples/basic/agent_lifecycle_example.py b/examples/basic/agent_lifecycle_example.py index b4334a83b..671a69916 100644 --- a/examples/basic/agent_lifecycle_example.py +++ b/examples/basic/agent_lifecycle_example.py @@ -4,7 +4,7 @@ from pydantic import BaseModel -from agents import Agent, AgentHooks, RunContextWrapper, Runner, Tool, function_tool +from agents import Agent, AgentHooks, RunContextWrapper, Runner, Tool, Action, function_tool class CustomAgentHooks(AgentHooks): @@ -28,10 +28,10 @@ async def on_handoff(self, context: RunContextWrapper, agent: Agent, source: Age f"### ({self.display_name}) {self.event_counter}: Agent {source.name} handed off to {agent.name}" ) - async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None: + async def on_tool_start(self, context: RunContextWrapper, agent: Agent, action: Action) -> None: self.event_counter += 1 print( - f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started tool {tool.name}" + f"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started tool {action.function_tool.name} with arguments {action.tool_call.arguments}" ) async def on_tool_end( diff --git a/examples/basic/lifecycle_example.py b/examples/basic/lifecycle_example.py index 02ce449f4..706402d4d 100644 --- a/examples/basic/lifecycle_example.py +++ b/examples/basic/lifecycle_example.py @@ -4,7 +4,7 @@ from pydantic import BaseModel -from agents import Agent, RunContextWrapper, RunHooks, Runner, Tool, Usage, function_tool +from agents import Agent, RunContextWrapper, RunHooks, Runner, Tool, Usage, function_tool, Action class ExampleHooks(RunHooks): @@ -26,10 +26,10 @@ async def on_agent_end(self, context: RunContextWrapper, agent: Agent, output: A f"### {self.event_counter}: Agent {agent.name} ended with output {output}. Usage: {self._usage_to_str(context.usage)}" ) - async def on_tool_start(self, context: RunContextWrapper, agent: Agent, tool: Tool) -> None: + async def on_tool_start(self, context: RunContextWrapper, agent: Agent, action: Action) -> None: self.event_counter += 1 print( - f"### {self.event_counter}: Tool {tool.name} started. Usage: {self._usage_to_str(context.usage)}" + f"### {self.event_counter}: Tool {action.function_tool.tool.name} started. Usage: {self._usage_to_str(context.usage)}" ) async def on_tool_end( diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 7de17efdb..e994c548a 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -75,6 +75,7 @@ WebSearchTool, default_tool_error_function, function_tool, + Action, ) from .tracing import ( AgentSpanData, @@ -239,6 +240,7 @@ def enable_verbose_stdout_logging(): "MCPToolApprovalRequest", "MCPToolApprovalFunctionResult", "function_tool", + "Action", "Usage", "add_trace_processor", "agent_span", diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py index a83af62a1..a7dd0a851 100644 --- a/src/agents/_run_impl.py +++ b/src/agents/_run_impl.py @@ -78,6 +78,8 @@ LocalShellTool, MCPToolApprovalRequest, Tool, + ToolRunFunction, + ToolRunComputerAction, ) from .tool_context import ToolContext from .tracing import ( @@ -126,19 +128,6 @@ class ToolRunHandoff: handoff: Handoff tool_call: ResponseFunctionToolCall - -@dataclass -class ToolRunFunction: - tool_call: ResponseFunctionToolCall - function_tool: FunctionTool - - -@dataclass -class ToolRunComputerAction: - tool_call: ResponseComputerToolCall - computer_tool: ComputerTool - - @dataclass class ToolRunMCPApprovalRequest: request_item: McpApprovalRequest @@ -544,9 +533,9 @@ async def execute_function_tool_calls( context_wrapper: RunContextWrapper[TContext], config: RunConfig, ) -> list[FunctionToolResult]: - async def run_single_tool( - func_tool: FunctionTool, tool_call: ResponseFunctionToolCall - ) -> Any: + async def run_single_tool(action: ToolRunFunction) -> Any: + func_tool = action.function_tool + tool_call = action.tool_call with function_span(func_tool.name) as span_fn: tool_context = ToolContext.from_agent_context( context_wrapper, @@ -557,9 +546,9 @@ async def run_single_tool( span_fn.span_data.input = tool_call.arguments try: _, _, result = await asyncio.gather( - hooks.on_tool_start(tool_context, agent, func_tool), + hooks.on_tool_start(context_wrapper, agent, action), ( - agent.hooks.on_tool_start(tool_context, agent, func_tool) + agent.hooks.on_tool_start(context_wrapper, agent, action) if agent.hooks else _coro.noop_coroutine() ), @@ -591,8 +580,7 @@ async def run_single_tool( tasks = [] for tool_run in tool_runs: - function_tool = tool_run.function_tool - tasks.append(run_single_tool(function_tool, tool_run.tool_call)) + tasks.append(run_single_tool(tool_run)) results = await asyncio.gather(*tasks) @@ -1039,9 +1027,9 @@ async def execute( ) _, _, output = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, action.computer_tool), + hooks.on_tool_start(context_wrapper, agent, action), ( - agent.hooks.on_tool_start(context_wrapper, agent, action.computer_tool) + agent.hooks.on_tool_start(context_wrapper, agent, action) if agent.hooks else _coro.noop_coroutine() ), diff --git a/src/agents/lifecycle.py b/src/agents/lifecycle.py index 2cce496c8..0c75f41c3 100644 --- a/src/agents/lifecycle.py +++ b/src/agents/lifecycle.py @@ -4,7 +4,7 @@ from .agent import Agent, AgentBase from .run_context import RunContextWrapper, TContext -from .tool import Tool +from .tool import Tool, Action TAgent = TypeVar("TAgent", bound=AgentBase, default=AgentBase) @@ -39,8 +39,8 @@ async def on_handoff( async def on_tool_start( self, context: RunContextWrapper[TContext], - agent: TAgent, - tool: Tool, + agent: Agent[TContext], + action: Action, ) -> None: """Called before a tool is invoked.""" pass @@ -90,8 +90,8 @@ async def on_handoff( async def on_tool_start( self, context: RunContextWrapper[TContext], - agent: TAgent, - tool: Tool, + agent: Agent[TContext], + action: Action, ) -> None: """Called before a tool is invoked.""" pass diff --git a/src/agents/tool.py b/src/agents/tool.py index b967e899b..2018ec080 100644 --- a/src/agents/tool.py +++ b/src/agents/tool.py @@ -16,6 +16,7 @@ from openai.types.responses.web_search_tool_param import UserLocation from pydantic import ValidationError from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict +from openai.types.responses import ResponseComputerToolCall, ResponseFunctionToolCall from . import _debug from .computer import AsyncComputer, Computer @@ -281,6 +282,19 @@ def name(self): ] """A tool that can be used in an agent.""" +@dataclass +class ToolRunFunction: + tool_call: ResponseFunctionToolCall + function_tool: FunctionTool + + +@dataclass +class ToolRunComputerAction: + tool_call: ResponseComputerToolCall + computer_tool: ComputerTool + +Action = Union[ToolRunFunction, ToolRunComputerAction] +"""An action that can be performed by an agent. It contains the tool call and the tool""" def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str: """The default tool error function, which just returns a generic error message.""" diff --git a/tests/test_agent_hooks.py b/tests/test_agent_hooks.py index a6c302dc8..63e0177cd 100644 --- a/tests/test_agent_hooks.py +++ b/tests/test_agent_hooks.py @@ -7,7 +7,7 @@ import pytest from typing_extensions import TypedDict -from agents.agent import Agent +from agents.agent import Agent, Action from agents.lifecycle import AgentHooks from agents.run import Runner from agents.run_context import RunContextWrapper, TContext @@ -53,7 +53,7 @@ async def on_tool_start( self, context: RunContextWrapper[TContext], agent: Agent[TContext], - tool: Tool, + action: Action, ) -> None: self.events["on_tool_start"] += 1 diff --git a/tests/test_computer_action.py b/tests/test_computer_action.py index a306b1841..5dd6a4dcb 100644 --- a/tests/test_computer_action.py +++ b/tests/test_computer_action.py @@ -24,6 +24,7 @@ from agents import ( Agent, + Action, AgentHooks, AsyncComputer, Computer, @@ -32,7 +33,8 @@ RunContextWrapper, RunHooks, ) -from agents._run_impl import ComputerAction, RunImpl, ToolRunComputerAction +from agents._run_impl import ComputerAction, RunImpl +from agents.tool import ToolRunComputerAction from agents.items import ToolCallOutputItem from agents.tool import ComputerToolSafetyCheckData @@ -224,9 +226,9 @@ def __init__(self) -> None: self.ended: list[tuple[Agent[Any], Any, str]] = [] async def on_tool_start( - self, context: RunContextWrapper[Any], agent: Agent[Any], tool: Any + self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action, ) -> None: - self.started.append((agent, tool)) + self.started.append((agent, action.computer_tool)) async def on_tool_end( self, context: RunContextWrapper[Any], agent: Agent[Any], tool: Any, result: str @@ -243,9 +245,9 @@ def __init__(self) -> None: self.ended: list[tuple[Agent[Any], Any, str]] = [] async def on_tool_start( - self, context: RunContextWrapper[Any], agent: Agent[Any], tool: Any + self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action, ) -> None: - self.started.append((agent, tool)) + self.started.append((agent, action.computer_tool)) async def on_tool_end( self, context: RunContextWrapper[Any], agent: Agent[Any], tool: Any, result: str From ee80bd5bada883b0bf9bf4f33a0637de917ff088 Mon Sep 17 00:00:00 2001 From: Meng Yan Date: Wed, 16 Jul 2025 10:20:32 +0800 Subject: [PATCH 2/4] fix: resolve lint issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix import sorting in multiple files - Remove duplicate ResponseComputerToolCall import - Organize imports according to ruff rules 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .vscode/PythonImportHelper-v2-Completion.json | 22635 ++++++++++++++++ examples/basic/agent_lifecycle_example.py | 2 +- examples/basic/lifecycle_example.py | 2 +- src/agents/__init__.py | 2 +- src/agents/_run_impl.py | 2 +- src/agents/lifecycle.py | 2 +- src/agents/tool.py | 2 +- tests/test_agent_hooks.py | 2 +- tests/test_computer_action.py | 5 +- 9 files changed, 22644 insertions(+), 10 deletions(-) create mode 100644 .vscode/PythonImportHelper-v2-Completion.json diff --git a/.vscode/PythonImportHelper-v2-Completion.json b/.vscode/PythonImportHelper-v2-Completion.json new file mode 100644 index 000000000..49b8882f6 --- /dev/null +++ b/.vscode/PythonImportHelper-v2-Completion.json @@ -0,0 +1,22635 @@ +[ + { + "label": "runpy", + "kind": 6, + "isExtraImport": true, + "importPath": "runpy", + "description": "runpy", + "detail": "runpy", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "annotations", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "os", + "kind": 6, + "isExtraImport": true, + "importPath": "os", + "description": "os", + "detail": "os", + "documentation": {} + }, + { + "label": "site", + "kind": 6, + "isExtraImport": true, + "importPath": "site", + "description": "site", + "detail": "site", + "documentation": {} + }, + { + "label": "sys", + "kind": 6, + "isExtraImport": true, + "importPath": "sys", + "description": "sys", + "detail": "sys", + "documentation": {} + }, + { + "label": "argparse", + "kind": 6, + "isExtraImport": true, + "importPath": "argparse", + "description": "argparse", + "detail": "argparse", + "documentation": {} + }, + { + "label": "openai", + "kind": 6, + "isExtraImport": true, + "importPath": "openai", + "description": "openai", + "detail": "openai", + "documentation": {} + }, + { + "label": "OpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncStream", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NotGiven", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NotGiven", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncStream", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncStream", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "DefaultAsyncHttpxClient", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "APIStatusError", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncStream", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NotGiven", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "DefaultAsyncHttpxClient", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "Omit", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "NOT_GIVEN", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "AsyncOpenAI", + "importPath": "openai", + "description": "openai", + "isExtraImport": true, + "detail": "openai", + "documentation": {} + }, + { + "label": "ThreadPoolExecutor", + "importPath": "concurrent.futures", + "description": "concurrent.futures", + "isExtraImport": true, + "detail": "concurrent.futures", + "documentation": {} + }, + { + "label": "asyncio", + "kind": 6, + "isExtraImport": true, + "importPath": "asyncio", + "description": "asyncio", + "detail": "asyncio", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FunctionToolResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolsToFinalOutputFunction", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolsToFinalOutputResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "input_guardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "output_guardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RawResponsesStreamEvent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Action", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Action", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchemaBase", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GenerateDynamicPromptData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "SQLiteSession", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "WebSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "custom_span", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HostedMCPTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MCPToolApprovalFunctionResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MCPToolApprovalRequest", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HostedMCPTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_tracing_disabled", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_default_openai_api", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_default_openai_client", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_tracing_disabled", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Model", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelProvider", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_tracing_disabled", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_tracing_disabled", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_tracing_disabled", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "WebSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "custom_span", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "gen_trace_id", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "CodeInterpreterTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AsyncComputer", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Button", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ComputerTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Environment", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FileSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ImageGenerationTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "WebSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunResultStreaming", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FunctionTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Model", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Prompt", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Action", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AsyncComputer", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Computer", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ComputerTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_default_openai_api", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_default_openai_client", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "set_default_openai_key", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentBase", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FunctionTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TContext", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "HandoffInputData", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ItemHelpers", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ReasoningItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MaxTurnsExceeded", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "generation_span", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Computer", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ComputerTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FileSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "WebSearchTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "AgentOutputSchemaBase", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "run_demo_loop", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FunctionTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "default_tool_error_function", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MaxTurnsExceeded", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunErrorDetails", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunHooks", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Computer", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ComputerTool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ReasoningItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "SQLiteSession", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "handoff", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "FunctionToolResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ToolsToFinalOutputResult", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MaxTurnsExceeded", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "MaxTurnsExceeded", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "OutputGuardrailTripwireTriggered", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents", + "description": "agents", + "isExtraImport": true, + "detail": "agents", + "documentation": {} + }, + { + "label": "pydantic", + "kind": 6, + "isExtraImport": true, + "importPath": "pydantic", + "description": "pydantic", + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Field", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Field", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "ConfigDict", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Field", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "ValidationError", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Field", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "create_model", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "GetCoreSchemaHandler", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "ValidationError", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "TypeAdapter", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Field", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "ValidationError", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "BaseModel", + "importPath": "pydantic", + "description": "pydantic", + "isExtraImport": true, + "detail": "pydantic", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Protocol", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "runtime_checkable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Annotated", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "get_args", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "get_origin", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "get_type_hints", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Annotated", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Generic", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "TYPE_CHECKING", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "overload", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "get_args", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "cast", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Any", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "dataclasses", + "kind": 6, + "isExtraImport": true, + "importPath": "dataclasses", + "description": "dataclasses", + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "fields", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "replace", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "fields", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "field", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "fields", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "json", + "kind": 6, + "isExtraImport": true, + "importPath": "json", + "description": "json", + "detail": "json", + "documentation": {} + }, + { + "label": "uuid", + "kind": 6, + "isExtraImport": true, + "importPath": "uuid", + "description": "uuid", + "detail": "uuid", + "documentation": {} + }, + { + "label": "ResponseContentPartDoneEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "EasyInputMessageParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCallParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCallParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputContentParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputFileParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputImageParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputTextParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessageParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseContentPartAddedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseContentPartDoneEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCreatedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionCallArgumentsDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputItemAddedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputItemDoneEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningSummaryPartAddedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningSummaryPartDoneEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningSummaryTextDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseRefusalDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseUsage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseIncludable", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseStreamEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseTextConfigParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ToolParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "WebSearchToolParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "response_create_params", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionWebSearch", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionWebSearch", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputItemParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseStreamEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseIncludable", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseUsage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCallParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseInputTextParam", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "Response", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputItem", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseCompletedEvent", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCall", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "ResponseFunctionWebSearch", + "importPath": "openai.types.responses", + "description": "openai.types.responses", + "isExtraImport": true, + "detail": "openai.types.responses", + "documentation": {} + }, + { + "label": "random", + "kind": 6, + "isExtraImport": true, + "importPath": "random", + "description": "random", + "detail": "random", + "documentation": {} + }, + { + "label": "base64", + "kind": 6, + "isExtraImport": true, + "importPath": "base64", + "description": "base64", + "detail": "base64", + "documentation": {} + }, + { + "label": "RECOMMENDED_PROMPT_PREFIX", + "importPath": "agents.extensions.handoff_prompt", + "description": "agents.extensions.handoff_prompt", + "isExtraImport": true, + "detail": "agents.extensions.handoff_prompt", + "documentation": {} + }, + { + "label": "prompt_with_handoff_instructions", + "importPath": "agents.extensions.handoff_prompt", + "description": "agents.extensions.handoff_prompt", + "isExtraImport": true, + "detail": "agents.extensions.handoff_prompt", + "documentation": {} + }, + { + "label": "prompt_with_handoff_instructions", + "importPath": "agents.extensions.handoff_prompt", + "description": "agents.extensions.handoff_prompt", + "isExtraImport": true, + "detail": "agents.extensions.handoff_prompt", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "MCPToolChoice", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "importPath": "agents.model_settings", + "description": "agents.model_settings", + "isExtraImport": true, + "detail": "agents.model_settings", + "documentation": {} + }, + { + "label": "time", + "kind": 6, + "isExtraImport": true, + "importPath": "time", + "description": "time", + "detail": "time", + "documentation": {} + }, + { + "label": "Sequence", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Coroutine", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Iterable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Mapping", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Sequence", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Mapping", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Sequence", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Mapping", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Awaitable", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Mapping", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "AsyncIterator", + "importPath": "collections.abc", + "description": "collections.abc", + "isExtraImport": true, + "detail": "collections.abc", + "documentation": {} + }, + { + "label": "Console", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Console", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Group", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Console", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Console", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Group", + "importPath": "rich.console", + "description": "rich.console", + "isExtraImport": true, + "detail": "rich.console", + "documentation": {} + }, + { + "label": "Live", + "importPath": "rich.live", + "description": "rich.live", + "isExtraImport": true, + "detail": "rich.live", + "documentation": {} + }, + { + "label": "Live", + "importPath": "rich.live", + "description": "rich.live", + "isExtraImport": true, + "detail": "rich.live", + "documentation": {} + }, + { + "label": "Spinner", + "importPath": "rich.spinner", + "description": "rich.spinner", + "isExtraImport": true, + "detail": "rich.spinner", + "documentation": {} + }, + { + "label": "Spinner", + "importPath": "rich.spinner", + "description": "rich.spinner", + "isExtraImport": true, + "detail": "rich.spinner", + "documentation": {} + }, + { + "label": "handoff_filters", + "importPath": "agents.extensions", + "description": "agents.extensions", + "isExtraImport": true, + "detail": "agents.extensions", + "documentation": {} + }, + { + "label": "handoff_filters", + "importPath": "agents.extensions", + "description": "agents.extensions", + "isExtraImport": true, + "detail": "agents.extensions", + "documentation": {} + }, + { + "label": "shutil", + "kind": 6, + "isExtraImport": true, + "importPath": "shutil", + "description": "shutil", + "detail": "shutil", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStdio", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStdio", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStreamableHttp", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerSse", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStreamableHttp", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStdio", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServerStdio", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPUtil", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "MCPServer", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "ToolFilterContext", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "create_static_tool_filter", + "importPath": "agents.mcp", + "description": "agents.mcp", + "isExtraImport": true, + "detail": "agents.mcp", + "documentation": {} + }, + { + "label": "subprocess", + "kind": 6, + "isExtraImport": true, + "importPath": "subprocess", + "description": "subprocess", + "detail": "subprocess", + "documentation": {} + }, + { + "label": "FastMCP", + "importPath": "mcp.server.fastmcp", + "description": "mcp.server.fastmcp", + "isExtraImport": true, + "detail": "mcp.server.fastmcp", + "documentation": {} + }, + { + "label": "FastMCP", + "importPath": "mcp.server.fastmcp", + "description": "mcp.server.fastmcp", + "isExtraImport": true, + "detail": "mcp.server.fastmcp", + "documentation": {} + }, + { + "label": "FastMCP", + "importPath": "mcp.server.fastmcp", + "description": "mcp.server.fastmcp", + "isExtraImport": true, + "detail": "mcp.server.fastmcp", + "documentation": {} + }, + { + "label": "requests", + "kind": 6, + "isExtraImport": true, + "importPath": "requests", + "description": "requests", + "detail": "requests", + "documentation": {} + }, + { + "label": "LitellmModel", + "importPath": "agents.extensions.models.litellm_model", + "description": "agents.extensions.models.litellm_model", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmModel", + "importPath": "agents.extensions.models.litellm_model", + "description": "agents.extensions.models.litellm_model", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmModel", + "importPath": "agents.extensions.models.litellm_model", + "description": "agents.extensions.models.litellm_model", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmModel", + "importPath": "agents.extensions.models.litellm_model", + "description": "agents.extensions.models.litellm_model", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmModel", + "importPath": "agents.extensions.models.litellm_model", + "description": "agents.extensions.models.litellm_model", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "numpy", + "kind": 6, + "isExtraImport": true, + "importPath": "numpy", + "description": "numpy", + "detail": "numpy", + "documentation": {} + }, + { + "label": "RealtimeSession", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeRunner", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeSessionEvent", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeRunner", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeSession", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "RealtimeSessionEvent", + "importPath": "agents.realtime", + "description": "agents.realtime", + "isExtraImport": true, + "detail": "agents.realtime", + "documentation": {} + }, + { + "label": "sounddevice", + "kind": 6, + "isExtraImport": true, + "importPath": "sounddevice", + "description": "sounddevice", + "detail": "sounddevice", + "documentation": {} + }, + { + "label": "numpy.typing", + "kind": 6, + "isExtraImport": true, + "importPath": "numpy.typing", + "description": "numpy.typing", + "detail": "numpy.typing", + "documentation": {} + }, + { + "label": "events", + "importPath": "textual", + "description": "textual", + "isExtraImport": true, + "detail": "textual", + "documentation": {} + }, + { + "label": "events", + "importPath": "textual", + "description": "textual", + "isExtraImport": true, + "detail": "textual", + "documentation": {} + }, + { + "label": "App", + "importPath": "textual.app", + "description": "textual.app", + "isExtraImport": true, + "detail": "textual.app", + "documentation": {} + }, + { + "label": "ComposeResult", + "importPath": "textual.app", + "description": "textual.app", + "isExtraImport": true, + "detail": "textual.app", + "documentation": {} + }, + { + "label": "App", + "importPath": "textual.app", + "description": "textual.app", + "isExtraImport": true, + "detail": "textual.app", + "documentation": {} + }, + { + "label": "ComposeResult", + "importPath": "textual.app", + "description": "textual.app", + "isExtraImport": true, + "detail": "textual.app", + "documentation": {} + }, + { + "label": "Container", + "importPath": "textual.containers", + "description": "textual.containers", + "isExtraImport": true, + "detail": "textual.containers", + "documentation": {} + }, + { + "label": "Horizontal", + "importPath": "textual.containers", + "description": "textual.containers", + "isExtraImport": true, + "detail": "textual.containers", + "documentation": {} + }, + { + "label": "Container", + "importPath": "textual.containers", + "description": "textual.containers", + "isExtraImport": true, + "detail": "textual.containers", + "documentation": {} + }, + { + "label": "reactive", + "importPath": "textual.reactive", + "description": "textual.reactive", + "isExtraImport": true, + "detail": "textual.reactive", + "documentation": {} + }, + { + "label": "reactive", + "importPath": "textual.reactive", + "description": "textual.reactive", + "isExtraImport": true, + "detail": "textual.reactive", + "documentation": {} + }, + { + "label": "RichLog", + "importPath": "textual.widgets", + "description": "textual.widgets", + "isExtraImport": true, + "detail": "textual.widgets", + "documentation": {} + }, + { + "label": "Static", + "importPath": "textual.widgets", + "description": "textual.widgets", + "isExtraImport": true, + "detail": "textual.widgets", + "documentation": {} + }, + { + "label": "Button", + "importPath": "textual.widgets", + "description": "textual.widgets", + "isExtraImport": true, + "detail": "textual.widgets", + "documentation": {} + }, + { + "label": "RichLog", + "importPath": "textual.widgets", + "description": "textual.widgets", + "isExtraImport": true, + "detail": "textual.widgets", + "documentation": {} + }, + { + "label": "Static", + "importPath": "textual.widgets", + "description": "textual.widgets", + "isExtraImport": true, + "detail": "textual.widgets", + "documentation": {} + }, + { + "label": "override", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "override", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "assert_never", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "assert_never", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "Literal", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "get_args", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "get_origin", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "Unpack", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeVar", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeAlias", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypeGuard", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "Concatenate", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "NotRequired", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "ParamSpec", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "TypedDict", + "importPath": "typing_extensions", + "description": "typing_extensions", + "isExtraImport": true, + "detail": "typing_extensions", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "Model", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelTracing", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "Model", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "ModelProvider", + "importPath": "agents.models.interface", + "description": "agents.models.interface", + "isExtraImport": true, + "detail": "agents.models.interface", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "importPath": "agents.models.openai_provider", + "description": "agents.models.openai_provider", + "isExtraImport": true, + "detail": "agents.models.openai_provider", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "importPath": "agents.models.openai_provider", + "description": "agents.models.openai_provider", + "isExtraImport": true, + "detail": "agents.models.openai_provider", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "importPath": "agents.models.openai_provider", + "description": "agents.models.openai_provider", + "isExtraImport": true, + "detail": "agents.models.openai_provider", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "importPath": "agents.models.openai_provider", + "description": "agents.models.openai_provider", + "isExtraImport": true, + "detail": "agents.models.openai_provider", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "agents.types", + "description": "agents.types", + "isExtraImport": true, + "detail": "agents.types", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "agents.types", + "description": "agents.types", + "isExtraImport": true, + "detail": "agents.types", + "documentation": {} + }, + { + "label": "ReasoningItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseStreamEvent", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseStreamEvent", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "RunItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "HandoffOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "importPath": "agents.items", + "description": "agents.items", + "isExtraImport": true, + "detail": "agents.items", + "documentation": {} + }, + { + "label": "Browser", + "importPath": "playwright.async_api", + "description": "playwright.async_api", + "isExtraImport": true, + "detail": "playwright.async_api", + "documentation": {} + }, + { + "label": "Page", + "importPath": "playwright.async_api", + "description": "playwright.async_api", + "isExtraImport": true, + "detail": "playwright.async_api", + "documentation": {} + }, + { + "label": "Playwright", + "importPath": "playwright.async_api", + "description": "playwright.async_api", + "isExtraImport": true, + "detail": "playwright.async_api", + "documentation": {} + }, + { + "label": "async_playwright", + "importPath": "playwright.async_api", + "description": "playwright.async_api", + "isExtraImport": true, + "detail": "playwright.async_api", + "documentation": {} + }, + { + "label": "tempfile", + "kind": 6, + "isExtraImport": true, + "importPath": "tempfile", + "description": "tempfile", + "detail": "tempfile", + "documentation": {} + }, + { + "label": "AudioInput", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "SingleAgentVoiceWorkflow", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "SingleAgentWorkflowCallbacks", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "VoicePipeline", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "StreamedAudioInput", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "VoicePipeline", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "VoiceWorkflowBase", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "VoiceWorkflowHelper", + "importPath": "agents.voice", + "description": "agents.voice", + "isExtraImport": true, + "detail": "agents.voice", + "documentation": {} + }, + { + "label": "curses", + "kind": 6, + "isExtraImport": true, + "importPath": "curses", + "description": "curses", + "detail": "curses", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "InputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "OutputTokensDetails", + "importPath": "openai.types.responses.response_usage", + "description": "openai.types.responses.response_usage", + "isExtraImport": true, + "detail": "openai.types.responses.response_usage", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "AgentsException", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "importPath": "agents.exceptions", + "description": "agents.exceptions", + "isExtraImport": true, + "detail": "agents.exceptions", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessageToolCall", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionAssistantMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionContentPartImageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionContentPartParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionContentPartTextParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionDeveloperMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessageToolCallParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionSystemMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionToolChoiceOptionParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionToolMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionUserMessageParam", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletion", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessageToolCall", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletion", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat", + "description": "openai.types.chat", + "isExtraImport": true, + "detail": "openai.types.chat", + "documentation": {} + }, + { + "label": "Annotation", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "AnnotationURLCitation", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "ChatCompletionMessage", + "importPath": "openai.types.chat.chat_completion_message", + "description": "openai.types.chat.chat_completion_message", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message", + "documentation": {} + }, + { + "label": "Function", + "importPath": "openai.types.chat.chat_completion_message_tool_call", + "description": "openai.types.chat.chat_completion_message_tool_call", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message_tool_call", + "documentation": {} + }, + { + "label": "ChatCompletionMessageToolCall", + "importPath": "openai.types.chat.chat_completion_message_tool_call", + "description": "openai.types.chat.chat_completion_message_tool_call", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message_tool_call", + "documentation": {} + }, + { + "label": "Function", + "importPath": "openai.types.chat.chat_completion_message_tool_call", + "description": "openai.types.chat.chat_completion_message_tool_call", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message_tool_call", + "documentation": {} + }, + { + "label": "Function", + "importPath": "openai.types.chat.chat_completion_message_tool_call", + "description": "openai.types.chat.chat_completion_message_tool_call", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_message_tool_call", + "documentation": {} + }, + { + "label": "graphviz", + "kind": 6, + "isExtraImport": true, + "importPath": "graphviz", + "description": "graphviz", + "detail": "graphviz", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents.handoffs", + "description": "agents.handoffs", + "isExtraImport": true, + "detail": "agents.handoffs", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents.handoffs", + "description": "agents.handoffs", + "isExtraImport": true, + "detail": "agents.handoffs", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents.handoffs", + "description": "agents.handoffs", + "isExtraImport": true, + "detail": "agents.handoffs", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents.handoffs", + "description": "agents.handoffs", + "isExtraImport": true, + "detail": "agents.handoffs", + "documentation": {} + }, + { + "label": "Handoff", + "importPath": "agents.handoffs", + "description": "agents.handoffs", + "isExtraImport": true, + "detail": "agents.handoffs", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "FunctionTool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "FunctionTool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "FunctionToolResult", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "ToolRunComputerAction", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "ComputerToolSafetyCheckData", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "default_tool_error_function", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "function_tool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "FileSearchTool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "WebSearchTool", + "importPath": "agents.tool", + "description": "agents.tool", + "isExtraImport": true, + "detail": "agents.tool", + "documentation": {} + }, + { + "label": "abc", + "kind": 6, + "isExtraImport": true, + "importPath": "abc", + "description": "abc", + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "abstractmethod", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "abstractmethod", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "inspect", + "kind": 6, + "isExtraImport": true, + "importPath": "inspect", + "description": "inspect", + "detail": "inspect", + "documentation": {} + }, + { + "label": "contextlib", + "kind": 6, + "isExtraImport": true, + "importPath": "contextlib", + "description": "contextlib", + "detail": "contextlib", + "documentation": {} + }, + { + "label": "AbstractAsyncContextManager", + "importPath": "contextlib", + "description": "contextlib", + "isExtraImport": true, + "detail": "contextlib", + "documentation": {} + }, + { + "label": "AsyncExitStack", + "importPath": "contextlib", + "description": "contextlib", + "isExtraImport": true, + "detail": "contextlib", + "documentation": {} + }, + { + "label": "timedelta", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "timezone", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "MemoryObjectReceiveStream", + "importPath": "anyio.streams.memory", + "description": "anyio.streams.memory", + "isExtraImport": true, + "detail": "anyio.streams.memory", + "documentation": {} + }, + { + "label": "MemoryObjectSendStream", + "importPath": "anyio.streams.memory", + "description": "anyio.streams.memory", + "isExtraImport": true, + "detail": "anyio.streams.memory", + "documentation": {} + }, + { + "label": "ClientSession", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "StdioServerParameters", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "stdio_client", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp", + "description": "mcp", + "isExtraImport": true, + "detail": "mcp", + "documentation": {} + }, + { + "label": "sse_client", + "importPath": "mcp.client.sse", + "description": "mcp.client.sse", + "isExtraImport": true, + "detail": "mcp.client.sse", + "documentation": {} + }, + { + "label": "GetSessionIdCallback", + "importPath": "mcp.client.streamable_http", + "description": "mcp.client.streamable_http", + "isExtraImport": true, + "detail": "mcp.client.streamable_http", + "documentation": {} + }, + { + "label": "streamablehttp_client", + "importPath": "mcp.client.streamable_http", + "description": "mcp.client.streamable_http", + "isExtraImport": true, + "detail": "mcp.client.streamable_http", + "documentation": {} + }, + { + "label": "SessionMessage", + "importPath": "mcp.shared.message", + "description": "mcp.shared.message", + "isExtraImport": true, + "detail": "mcp.shared.message", + "documentation": {} + }, + { + "label": "CallToolResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "GetPromptResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "InitializeResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "ListPromptsResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "CallToolResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "GetPromptResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "ListPromptsResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "PromptMessage", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "TextContent", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "ListToolsResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "ListToolsResult", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "Tool", + "importPath": "mcp.types", + "description": "mcp.types", + "isExtraImport": true, + "detail": "mcp.types", + "documentation": {} + }, + { + "label": "functools", + "kind": 6, + "isExtraImport": true, + "importPath": "functools", + "description": "functools", + "detail": "functools", + "documentation": {} + }, + { + "label": "cached_property", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "sqlite3", + "kind": 6, + "isExtraImport": true, + "importPath": "sqlite3", + "description": "sqlite3", + "detail": "sqlite3", + "documentation": {} + }, + { + "label": "threading", + "kind": 6, + "isExtraImport": true, + "importPath": "threading", + "description": "threading", + "detail": "threading", + "documentation": {} + }, + { + "label": "File", + "importPath": "openai.types.chat.chat_completion_content_part_param", + "description": "openai.types.chat.chat_completion_content_part_param", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_content_part_param", + "documentation": {} + }, + { + "label": "FileFile", + "importPath": "openai.types.chat.chat_completion_content_part_param", + "description": "openai.types.chat.chat_completion_content_part_param", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_content_part_param", + "documentation": {} + }, + { + "label": "ChatCompletionToolParam", + "importPath": "openai.types.chat.chat_completion_tool_param", + "description": "openai.types.chat.chat_completion_tool_param", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_tool_param", + "documentation": {} + }, + { + "label": "ResponseFormat", + "importPath": "openai.types.chat.completion_create_params", + "description": "openai.types.chat.completion_create_params", + "isExtraImport": true, + "detail": "openai.types.chat.completion_create_params", + "documentation": {} + }, + { + "label": "FunctionCallOutput", + "importPath": "openai.types.responses.response_input_param", + "description": "openai.types.responses.response_input_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_param", + "documentation": {} + }, + { + "label": "ItemReference", + "importPath": "openai.types.responses.response_input_param", + "description": "openai.types.responses.response_input_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_param", + "documentation": {} + }, + { + "label": "Message", + "importPath": "openai.types.responses.response_input_param", + "description": "openai.types.responses.response_input_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_param", + "documentation": {} + }, + { + "label": "ComputerCallOutput", + "importPath": "openai.types.responses.response_input_param", + "description": "openai.types.responses.response_input_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_param", + "documentation": {} + }, + { + "label": "McpApprovalResponse", + "importPath": "openai.types.responses.response_input_param", + "description": "openai.types.responses.response_input_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_param", + "documentation": {} + }, + { + "label": "Summary", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "Summary", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "Summary", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "ResponseReasoningItem", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "Summary", + "importPath": "openai.types.responses.response_reasoning_item", + "description": "openai.types.responses.response_reasoning_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "PromptTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "PromptTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "PromptTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "CompletionUsage", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "PromptTokensDetails", + "importPath": "openai.types.completion_usage", + "description": "openai.types.completion_usage", + "isExtraImport": true, + "detail": "openai.types.completion_usage", + "documentation": {} + }, + { + "label": "Part", + "importPath": "openai.types.responses.response_reasoning_summary_part_added_event", + "description": "openai.types.responses.response_reasoning_summary_part_added_event", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_summary_part_added_event", + "documentation": {} + }, + { + "label": "Part", + "importPath": "openai.types.responses.response_reasoning_summary_part_done_event", + "description": "openai.types.responses.response_reasoning_summary_part_done_event", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_summary_part_done_event", + "documentation": {} + }, + { + "label": "enum", + "kind": 6, + "isExtraImport": true, + "importPath": "enum", + "description": "enum", + "detail": "enum", + "documentation": {} + }, + { + "label": "Enum", + "importPath": "enum", + "description": "enum", + "isExtraImport": true, + "detail": "enum", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "Variables", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ResponsePromptParam", + "importPath": "openai.types.responses.response_prompt_param", + "description": "openai.types.responses.response_prompt_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_prompt_param", + "documentation": {} + }, + { + "label": "ChatModel", + "importPath": "openai.types", + "description": "openai.types", + "isExtraImport": true, + "detail": "openai.types", + "documentation": {} + }, + { + "label": "ChatModel", + "importPath": "openai.types", + "description": "openai.types", + "isExtraImport": true, + "detail": "openai.types", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "ChatCompletion", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "ChatCompletion", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "ChatCompletion", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion", + "description": "openai.types.chat.chat_completion", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion", + "documentation": {} + }, + { + "label": "httpx", + "kind": 6, + "isExtraImport": true, + "importPath": "httpx", + "description": "httpx", + "detail": "httpx", + "documentation": {} + }, + { + "label": "ASGITransport", + "importPath": "httpx", + "description": "httpx", + "isExtraImport": true, + "detail": "httpx", + "documentation": {} + }, + { + "label": "AsyncClient", + "importPath": "httpx", + "description": "httpx", + "isExtraImport": true, + "detail": "httpx", + "documentation": {} + }, + { + "label": "websockets", + "kind": 6, + "isExtraImport": true, + "importPath": "websockets", + "description": "websockets", + "detail": "websockets", + "documentation": {} + }, + { + "label": "ConversationItem", + "importPath": "openai.types.beta.realtime.conversation_item", + "description": "openai.types.beta.realtime.conversation_item", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item", + "documentation": {} + }, + { + "label": "ConversationItem", + "importPath": "openai.types.beta.realtime.conversation_item", + "description": "openai.types.beta.realtime.conversation_item", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item", + "documentation": {} + }, + { + "label": "ConversationItem", + "importPath": "openai.types.beta.realtime.conversation_item", + "description": "openai.types.beta.realtime.conversation_item", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item", + "documentation": {} + }, + { + "label": "ConversationItem", + "importPath": "openai.types.beta.realtime.conversation_item", + "description": "openai.types.beta.realtime.conversation_item", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item", + "documentation": {} + }, + { + "label": "ConversationItemContent", + "importPath": "openai.types.beta.realtime.conversation_item_content", + "description": "openai.types.beta.realtime.conversation_item_content", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_content", + "documentation": {} + }, + { + "label": "ConversationItemContent", + "importPath": "openai.types.beta.realtime.conversation_item_content", + "description": "openai.types.beta.realtime.conversation_item_content", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_content", + "documentation": {} + }, + { + "label": "ConversationItemCreateEvent", + "importPath": "openai.types.beta.realtime.conversation_item_create_event", + "description": "openai.types.beta.realtime.conversation_item_create_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_create_event", + "documentation": {} + }, + { + "label": "ConversationItemCreateEvent", + "importPath": "openai.types.beta.realtime.conversation_item_create_event", + "description": "openai.types.beta.realtime.conversation_item_create_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_create_event", + "documentation": {} + }, + { + "label": "ConversationItemRetrieveEvent", + "importPath": "openai.types.beta.realtime.conversation_item_retrieve_event", + "description": "openai.types.beta.realtime.conversation_item_retrieve_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_retrieve_event", + "documentation": {} + }, + { + "label": "ConversationItemTruncateEvent", + "importPath": "openai.types.beta.realtime.conversation_item_truncate_event", + "description": "openai.types.beta.realtime.conversation_item_truncate_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_truncate_event", + "documentation": {} + }, + { + "label": "ConversationItemTruncateEvent", + "importPath": "openai.types.beta.realtime.conversation_item_truncate_event", + "description": "openai.types.beta.realtime.conversation_item_truncate_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.conversation_item_truncate_event", + "documentation": {} + }, + { + "label": "InputAudioBufferAppendEvent", + "importPath": "openai.types.beta.realtime.input_audio_buffer_append_event", + "description": "openai.types.beta.realtime.input_audio_buffer_append_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.input_audio_buffer_append_event", + "documentation": {} + }, + { + "label": "InputAudioBufferAppendEvent", + "importPath": "openai.types.beta.realtime.input_audio_buffer_append_event", + "description": "openai.types.beta.realtime.input_audio_buffer_append_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.input_audio_buffer_append_event", + "documentation": {} + }, + { + "label": "InputAudioBufferCommitEvent", + "importPath": "openai.types.beta.realtime.input_audio_buffer_commit_event", + "description": "openai.types.beta.realtime.input_audio_buffer_commit_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.input_audio_buffer_commit_event", + "documentation": {} + }, + { + "label": "RealtimeClientEvent", + "importPath": "openai.types.beta.realtime.realtime_client_event", + "description": "openai.types.beta.realtime.realtime_client_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.realtime_client_event", + "documentation": {} + }, + { + "label": "RealtimeServerEvent", + "importPath": "openai.types.beta.realtime.realtime_server_event", + "description": "openai.types.beta.realtime.realtime_server_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.realtime_server_event", + "documentation": {} + }, + { + "label": "ResponseAudioDeltaEvent", + "importPath": "openai.types.beta.realtime.response_audio_delta_event", + "description": "openai.types.beta.realtime.response_audio_delta_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.response_audio_delta_event", + "documentation": {} + }, + { + "label": "ResponseCancelEvent", + "importPath": "openai.types.beta.realtime.response_cancel_event", + "description": "openai.types.beta.realtime.response_cancel_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.response_cancel_event", + "documentation": {} + }, + { + "label": "ResponseCreateEvent", + "importPath": "openai.types.beta.realtime.response_create_event", + "description": "openai.types.beta.realtime.response_create_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.response_create_event", + "documentation": {} + }, + { + "label": "Session", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "SessionTool", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "SessionTracing", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "SessionTracingTracingConfiguration", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "SessionUpdateEvent", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "SessionTracingTracingConfiguration", + "importPath": "openai.types.beta.realtime.session_update_event", + "description": "openai.types.beta.realtime.session_update_event", + "isExtraImport": true, + "detail": "openai.types.beta.realtime.session_update_event", + "documentation": {} + }, + { + "label": "ClientConnection", + "importPath": "websockets.asyncio.client", + "description": "websockets.asyncio.client", + "isExtraImport": true, + "detail": "websockets.asyncio.client", + "documentation": {} + }, + { + "label": "MaybeAwaitable", + "importPath": "agents.util._types", + "description": "agents.util._types", + "isExtraImport": true, + "detail": "agents.util._types", + "documentation": {} + }, + { + "label": "MaybeAwaitable", + "importPath": "agents.util._types", + "description": "agents.util._types", + "isExtraImport": true, + "detail": "agents.util._types", + "documentation": {} + }, + { + "label": "logging", + "kind": 6, + "isExtraImport": true, + "importPath": "logging", + "description": "logging", + "detail": "logging", + "documentation": {} + }, + { + "label": "queue", + "kind": 6, + "isExtraImport": true, + "importPath": "queue", + "description": "queue", + "detail": "queue", + "documentation": {} + }, + { + "label": "contextvars", + "kind": 6, + "isExtraImport": true, + "importPath": "contextvars", + "description": "contextvars", + "detail": "contextvars", + "documentation": {} + }, + { + "label": "re", + "kind": 6, + "isExtraImport": true, + "importPath": "re", + "description": "re", + "detail": "re", + "documentation": {} + }, + { + "label": "io", + "kind": 6, + "isExtraImport": true, + "importPath": "io", + "description": "io", + "detail": "io", + "documentation": {} + }, + { + "label": "wave", + "kind": 6, + "isExtraImport": true, + "importPath": "wave", + "description": "wave", + "detail": "wave", + "documentation": {} + }, + { + "label": "ResponseCodeInterpreterToolCall", + "importPath": "openai.types.responses.response_code_interpreter_tool_call", + "description": "openai.types.responses.response_code_interpreter_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_code_interpreter_tool_call", + "documentation": {} + }, + { + "label": "ResponseCodeInterpreterToolCall", + "importPath": "openai.types.responses.response_code_interpreter_tool_call", + "description": "openai.types.responses.response_code_interpreter_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_code_interpreter_tool_call", + "documentation": {} + }, + { + "label": "ActionClick", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionDoubleClick", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionDrag", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionKeypress", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionMove", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionScreenshot", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionScroll", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionType", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionWait", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "PendingSafetyCheck", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionClick", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionDoubleClick", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionDrag", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionDragPath", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionKeypress", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionMove", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionScreenshot", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionScroll", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionType", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionWait", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "PendingSafetyCheck", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionScreenshot", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ResponseComputerToolCall", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ActionClick", + "importPath": "openai.types.responses.response_computer_tool_call", + "description": "openai.types.responses.response_computer_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call", + "documentation": {} + }, + { + "label": "ComputerCallOutputAcknowledgedSafetyCheck", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "ComputerCallOutput", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "FunctionCallOutput", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "LocalShellCallOutput", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "McpApprovalResponse", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "FunctionCallOutput", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "FunctionCallOutput", + "importPath": "openai.types.responses.response_input_item_param", + "description": "openai.types.responses.response_input_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_input_item_param", + "documentation": {} + }, + { + "label": "ImageGenerationCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "LocalShellCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpApprovalRequest", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpListTools", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "ImageGenerationCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "LocalShellCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpApprovalRequest", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpListTools", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "LocalShellCall", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "McpApprovalRequest", + "importPath": "openai.types.responses.response_output_item", + "description": "openai.types.responses.response_output_item", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_item", + "documentation": {} + }, + { + "label": "Docstring", + "importPath": "griffe", + "description": "griffe", + "isExtraImport": true, + "detail": "griffe", + "documentation": {} + }, + { + "label": "DocstringSectionKind", + "importPath": "griffe", + "description": "griffe", + "isExtraImport": true, + "detail": "griffe", + "documentation": {} + }, + { + "label": "FieldInfo", + "importPath": "pydantic.fields", + "description": "pydantic.fields", + "isExtraImport": true, + "detail": "pydantic.fields", + "documentation": {} + }, + { + "label": "copy", + "kind": 6, + "isExtraImport": true, + "importPath": "copy", + "description": "copy", + "detail": "copy", + "documentation": {} + }, + { + "label": "Body", + "importPath": "openai._types", + "description": "openai._types", + "isExtraImport": true, + "detail": "openai._types", + "documentation": {} + }, + { + "label": "Query", + "importPath": "openai._types", + "description": "openai._types", + "isExtraImport": true, + "detail": "openai._types", + "documentation": {} + }, + { + "label": "Reasoning", + "importPath": "openai.types.shared", + "description": "openai.types.shared", + "isExtraImport": true, + "detail": "openai.types.shared", + "documentation": {} + }, + { + "label": "Reasoning", + "importPath": "openai.types.shared", + "description": "openai.types.shared", + "isExtraImport": true, + "detail": "openai.types.shared", + "documentation": {} + }, + { + "label": "core_schema", + "importPath": "pydantic_core", + "description": "pydantic_core", + "isExtraImport": true, + "detail": "pydantic_core", + "documentation": {} + }, + { + "label": "to_json", + "importPath": "pydantic_core", + "description": "pydantic_core", + "isExtraImport": true, + "detail": "pydantic_core", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses.response_text_delta_event", + "description": "openai.types.responses.response_text_delta_event", + "isExtraImport": true, + "detail": "openai.types.responses.response_text_delta_event", + "documentation": {} + }, + { + "label": "ResponseTextDeltaEvent", + "importPath": "openai.types.responses.response_text_delta_event", + "description": "openai.types.responses.response_text_delta_event", + "isExtraImport": true, + "detail": "openai.types.responses.response_text_delta_event", + "documentation": {} + }, + { + "label": "Filters", + "importPath": "openai.types.responses.file_search_tool_param", + "description": "openai.types.responses.file_search_tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.file_search_tool_param", + "documentation": {} + }, + { + "label": "RankingOptions", + "importPath": "openai.types.responses.file_search_tool_param", + "description": "openai.types.responses.file_search_tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.file_search_tool_param", + "documentation": {} + }, + { + "label": "CodeInterpreter", + "importPath": "openai.types.responses.tool_param", + "description": "openai.types.responses.tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.tool_param", + "documentation": {} + }, + { + "label": "ImageGeneration", + "importPath": "openai.types.responses.tool_param", + "description": "openai.types.responses.tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.tool_param", + "documentation": {} + }, + { + "label": "Mcp", + "importPath": "openai.types.responses.tool_param", + "description": "openai.types.responses.tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.tool_param", + "documentation": {} + }, + { + "label": "UserLocation", + "importPath": "openai.types.responses.web_search_tool_param", + "description": "openai.types.responses.web_search_tool_param", + "isExtraImport": true, + "detail": "openai.types.responses.web_search_tool_param", + "documentation": {} + }, + { + "label": "importlib.metadata", + "kind": 6, + "isExtraImport": true, + "importPath": "importlib.metadata", + "description": "importlib.metadata", + "detail": "importlib.metadata", + "documentation": {} + }, + { + "label": "FastAPI", + "importPath": "fastapi", + "description": "fastapi", + "isExtraImport": true, + "detail": "fastapi", + "documentation": {} + }, + { + "label": "StreamingResponse", + "importPath": "starlette.responses", + "description": "starlette.responses", + "isExtraImport": true, + "detail": "starlette.responses", + "documentation": {} + }, + { + "label": "pytest", + "kind": 6, + "isExtraImport": true, + "importPath": "pytest", + "description": "pytest", + "detail": "pytest", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "snapshot", + "importPath": "inline_snapshot", + "description": "inline_snapshot", + "isExtraImport": true, + "detail": "inline_snapshot", + "documentation": {} + }, + { + "label": "_MCPServerWithClientSession", + "importPath": "agents.mcp.server", + "description": "agents.mcp.server", + "isExtraImport": true, + "detail": "agents.mcp.server", + "documentation": {} + }, + { + "label": "_MCPServerWithClientSession", + "importPath": "agents.mcp.server", + "description": "agents.mcp.server", + "isExtraImport": true, + "detail": "agents.mcp.server", + "documentation": {} + }, + { + "label": "ToolFilter", + "importPath": "agents.mcp.util", + "description": "agents.mcp.util", + "isExtraImport": true, + "detail": "agents.mcp.util", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "AsyncMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "MagicMock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "patch", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "Mock", + "importPath": "unittest.mock", + "description": "unittest.mock", + "isExtraImport": true, + "detail": "unittest.mock", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "TContext", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "importPath": "agents.run_context", + "description": "agents.run_context", + "isExtraImport": true, + "detail": "agents.run_context", + "documentation": {} + }, + { + "label": "litellm", + "kind": 6, + "isExtraImport": true, + "importPath": "litellm", + "description": "litellm", + "detail": "litellm", + "documentation": {} + }, + { + "label": "Choices", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "Message", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "Choices", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "Message", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "ModelResponse", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "litellm.types.utils", + "description": "litellm.types.utils", + "isExtraImport": true, + "detail": "litellm.types.utils", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents.models.openai_chatcompletions", + "description": "agents.models.openai_chatcompletions", + "isExtraImport": true, + "detail": "agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents.models.openai_chatcompletions", + "description": "agents.models.openai_chatcompletions", + "isExtraImport": true, + "detail": "agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents.models.openai_chatcompletions", + "description": "agents.models.openai_chatcompletions", + "isExtraImport": true, + "detail": "agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents.models.openai_chatcompletions", + "description": "agents.models.openai_chatcompletions", + "isExtraImport": true, + "detail": "agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "importPath": "agents.models.openai_chatcompletions", + "description": "agents.models.openai_chatcompletions", + "isExtraImport": true, + "detail": "agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDelta", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDeltaToolCall", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDeltaToolCallFunction", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChatCompletionChunk", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDelta", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDeltaToolCall", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDeltaToolCallFunction", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "Choice", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "ChoiceDelta", + "importPath": "openai.types.chat.chat_completion_chunk", + "description": "openai.types.chat.chat_completion_chunk", + "isExtraImport": true, + "detail": "openai.types.chat.chat_completion_chunk", + "documentation": {} + }, + { + "label": "LitellmProvider", + "importPath": "agents.extensions.models.litellm_provider", + "description": "agents.extensions.models.litellm_provider", + "isExtraImport": true, + "detail": "agents.extensions.models.litellm_provider", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "set_default_agent_runner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "Runner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "RunConfig", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "set_default_agent_runner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "importPath": "agents.run", + "description": "agents.run", + "isExtraImport": true, + "detail": "agents.run", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "importPath": "agents.realtime.agent", + "description": "agents.realtime.agent", + "isExtraImport": true, + "detail": "agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "importPath": "agents.realtime.agent", + "description": "agents.realtime.agent", + "isExtraImport": true, + "detail": "agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "importPath": "agents.realtime.agent", + "description": "agents.realtime.agent", + "isExtraImport": true, + "detail": "agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeModelTracingConfig", + "importPath": "agents.realtime.config", + "description": "agents.realtime.config", + "isExtraImport": true, + "detail": "agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeRunConfig", + "importPath": "agents.realtime.config", + "description": "agents.realtime.config", + "isExtraImport": true, + "detail": "agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeSessionModelSettings", + "importPath": "agents.realtime.config", + "description": "agents.realtime.config", + "isExtraImport": true, + "detail": "agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeRunConfig", + "importPath": "agents.realtime.config", + "description": "agents.realtime.config", + "isExtraImport": true, + "detail": "agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeModelSendAudio", + "importPath": "agents.realtime.model_inputs", + "description": "agents.realtime.model_inputs", + "isExtraImport": true, + "detail": "agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendRawMessage", + "importPath": "agents.realtime.model_inputs", + "description": "agents.realtime.model_inputs", + "isExtraImport": true, + "detail": "agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendToolOutput", + "importPath": "agents.realtime.model_inputs", + "description": "agents.realtime.model_inputs", + "isExtraImport": true, + "detail": "agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendUserInput", + "importPath": "agents.realtime.model_inputs", + "description": "agents.realtime.model_inputs", + "isExtraImport": true, + "detail": "agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelUserInputMessage", + "importPath": "agents.realtime.model_inputs", + "description": "agents.realtime.model_inputs", + "isExtraImport": true, + "detail": "agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "_ConversionHelper", + "importPath": "agents.realtime.openai_realtime", + "description": "agents.realtime.openai_realtime", + "isExtraImport": true, + "detail": "agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "_ConversionHelper", + "importPath": "agents.realtime.openai_realtime", + "description": "agents.realtime.openai_realtime", + "isExtraImport": true, + "detail": "agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "OpenAIRealtimeWebSocketModel", + "importPath": "agents.realtime.openai_realtime", + "description": "agents.realtime.openai_realtime", + "isExtraImport": true, + "detail": "agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "OpenAIRealtimeWebSocketModel", + "importPath": "agents.realtime.openai_realtime", + "description": "agents.realtime.openai_realtime", + "isExtraImport": true, + "detail": "agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "AssistantMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "SystemMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "UserMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "AssistantMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "AssistantText", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "InputAudio", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "InputText", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "UserMessageItem", + "importPath": "agents.realtime.items", + "description": "agents.realtime.items", + "isExtraImport": true, + "detail": "agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeModelEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelErrorEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelToolCallEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioDoneEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioInterruptedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelConnectionStatusEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelErrorEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelInputAudioTranscriptionCompletedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelItemDeletedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelItemUpdatedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelOtherEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelToolCallEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTranscriptDeltaEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTurnEndedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTurnStartedEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelErrorEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelExceptionEvent", + "importPath": "agents.realtime.model_events", + "description": "agents.realtime.model_events", + "isExtraImport": true, + "detail": "agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModel", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModelConfig", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModel", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModel", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModelConfig", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModelListener", + "importPath": "agents.realtime.model", + "description": "agents.realtime.model", + "isExtraImport": true, + "detail": "agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeRunner", + "importPath": "agents.realtime.runner", + "description": "agents.realtime.runner", + "isExtraImport": true, + "detail": "agents.realtime.runner", + "documentation": {} + }, + { + "label": "RealtimeSession", + "importPath": "agents.realtime.session", + "description": "agents.realtime.session", + "isExtraImport": true, + "detail": "agents.realtime.session", + "documentation": {} + }, + { + "label": "RealtimeSession", + "importPath": "agents.realtime.session", + "description": "agents.realtime.session", + "isExtraImport": true, + "detail": "agents.realtime.session", + "documentation": {} + }, + { + "label": "RealtimeSession", + "importPath": "agents.realtime.session", + "description": "agents.realtime.session", + "isExtraImport": true, + "detail": "agents.realtime.session", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "importPath": "agents.guardrail", + "description": "agents.guardrail", + "isExtraImport": true, + "detail": "agents.guardrail", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "importPath": "agents.guardrail", + "description": "agents.guardrail", + "isExtraImport": true, + "detail": "agents.guardrail", + "documentation": {} + }, + { + "label": "input_guardrail", + "importPath": "agents.guardrail", + "description": "agents.guardrail", + "isExtraImport": true, + "detail": "agents.guardrail", + "documentation": {} + }, + { + "label": "output_guardrail", + "importPath": "agents.guardrail", + "description": "agents.guardrail", + "isExtraImport": true, + "detail": "agents.guardrail", + "documentation": {} + }, + { + "label": "RealtimeAgentEndEvent", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAgentStartEvent", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudio", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudioEnd", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudioInterrupted", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeError", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeGuardrailTripped", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeHistoryAdded", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeHistoryUpdated", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeRawModelEvent", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeToolEnd", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeToolStart", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeError", + "importPath": "agents.realtime.events", + "description": "agents.realtime.events", + "isExtraImport": true, + "detail": "agents.realtime.events", + "documentation": {} + }, + { + "label": "ToolContext", + "importPath": "agents.tool_context", + "description": "agents.tool_context", + "isExtraImport": true, + "detail": "agents.tool_context", + "documentation": {} + }, + { + "label": "ToolContext", + "importPath": "agents.tool_context", + "description": "agents.tool_context", + "isExtraImport": true, + "detail": "agents.tool_context", + "documentation": {} + }, + { + "label": "ToolContext", + "importPath": "agents.tool_context", + "description": "agents.tool_context", + "isExtraImport": true, + "detail": "agents.tool_context", + "documentation": {} + }, + { + "label": "ToolContext", + "importPath": "agents.tool_context", + "description": "agents.tool_context", + "isExtraImport": true, + "detail": "agents.tool_context", + "documentation": {} + }, + { + "label": "BackendSpanExporter", + "importPath": "agents.tracing.processors", + "description": "agents.tracing.processors", + "isExtraImport": true, + "detail": "agents.tracing.processors", + "documentation": {} + }, + { + "label": "BackendSpanExporter", + "importPath": "agents.tracing.processors", + "description": "agents.tracing.processors", + "isExtraImport": true, + "detail": "agents.tracing.processors", + "documentation": {} + }, + { + "label": "BatchTraceProcessor", + "importPath": "agents.tracing.processors", + "description": "agents.tracing.processors", + "isExtraImport": true, + "detail": "agents.tracing.processors", + "documentation": {} + }, + { + "label": "SimpleNamespace", + "importPath": "types", + "description": "types", + "isExtraImport": true, + "detail": "types", + "documentation": {} + }, + { + "label": "AgentOutputSchemaBase", + "importPath": "agents.agent_output", + "description": "agents.agent_output", + "isExtraImport": true, + "detail": "agents.agent_output", + "documentation": {} + }, + { + "label": "AgentOutputSchemaBase", + "importPath": "agents.agent_output", + "description": "agents.agent_output", + "isExtraImport": true, + "detail": "agents.agent_output", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "importPath": "agents.agent_output", + "description": "agents.agent_output", + "isExtraImport": true, + "detail": "agents.agent_output", + "documentation": {} + }, + { + "label": "_WRAPPER_DICT_KEY", + "importPath": "agents.agent_output", + "description": "agents.agent_output", + "isExtraImport": true, + "detail": "agents.agent_output", + "documentation": {} + }, + { + "label": "_WRAPPER_DICT_KEY", + "importPath": "agents.agent_output", + "description": "agents.agent_output", + "isExtraImport": true, + "detail": "agents.agent_output", + "documentation": {} + }, + { + "label": "_openai_shared", + "importPath": "agents.models", + "description": "agents.models", + "isExtraImport": true, + "detail": "agents.models", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "importPath": "agents.models.openai_responses", + "description": "agents.models.openai_responses", + "isExtraImport": true, + "detail": "agents.models.openai_responses", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "importPath": "agents.models.openai_responses", + "description": "agents.models.openai_responses", + "isExtraImport": true, + "detail": "agents.models.openai_responses", + "documentation": {} + }, + { + "label": "Converter", + "importPath": "agents.models.openai_responses", + "description": "agents.models.openai_responses", + "isExtraImport": true, + "detail": "agents.models.openai_responses", + "documentation": {} + }, + { + "label": "set_trace_processors", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "SpanError", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "generation_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "Span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "Trace", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "agent_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "custom_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "function_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "generation_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "handoff_span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "trace", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "Span", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "Trace", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "TracingProcessor", + "importPath": "agents.tracing", + "description": "agents.tracing", + "isExtraImport": true, + "detail": "agents.tracing", + "documentation": {} + }, + { + "label": "get_trace_provider", + "importPath": "agents.tracing.setup", + "description": "agents.tracing.setup", + "isExtraImport": true, + "detail": "agents.tracing.setup", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents.usage", + "description": "agents.usage", + "isExtraImport": true, + "detail": "agents.usage", + "documentation": {} + }, + { + "label": "Usage", + "importPath": "agents.usage", + "description": "agents.usage", + "isExtraImport": true, + "detail": "agents.usage", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "Agent", + "importPath": "agents.agent", + "description": "agents.agent", + "isExtraImport": true, + "detail": "agents.agent", + "documentation": {} + }, + { + "label": "Action", + "importPath": "agents.agent", + "description": "agents.agent", + "isExtraImport": true, + "detail": "agents.agent", + "documentation": {} + }, + { + "label": "ToolsToFinalOutputResult", + "importPath": "agents.agent", + "description": "agents.agent", + "isExtraImport": true, + "detail": "agents.agent", + "documentation": {} + }, + { + "label": "AgentHooks", + "importPath": "agents.lifecycle", + "description": "agents.lifecycle", + "isExtraImport": true, + "detail": "agents.lifecycle", + "documentation": {} + }, + { + "label": "AgentUpdatedStreamEvent", + "importPath": "agents.stream_events", + "description": "agents.stream_events", + "isExtraImport": true, + "detail": "agents.stream_events", + "documentation": {} + }, + { + "label": "ComputerAction", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepFinalOutput", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepHandoff", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepRunAgain", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "SingleStepResult", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "AgentToolUseTracker", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "importPath": "agents._run_impl", + "description": "agents._run_impl", + "isExtraImport": true, + "detail": "agents._run_impl", + "documentation": {} + }, + { + "label": "generate_func_documentation", + "importPath": "agents.function_schema", + "description": "agents.function_schema", + "isExtraImport": true, + "detail": "agents.function_schema", + "documentation": {} + }, + { + "label": "function_schema", + "importPath": "agents.function_schema", + "description": "agents.function_schema", + "isExtraImport": true, + "detail": "agents.function_schema", + "documentation": {} + }, + { + "label": "remove_all_tools", + "importPath": "agents.extensions.handoff_filters", + "description": "agents.extensions.handoff_filters", + "isExtraImport": true, + "detail": "agents.extensions.handoff_filters", + "documentation": {} + }, + { + "label": "ResponseComputerToolCallParam", + "importPath": "openai.types.responses.response_computer_tool_call_param", + "description": "openai.types.responses.response_computer_tool_call_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_computer_tool_call_param", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCall", + "importPath": "openai.types.responses.response_file_search_tool_call", + "description": "openai.types.responses.response_file_search_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_file_search_tool_call", + "documentation": {} + }, + { + "label": "ResponseFileSearchToolCallParam", + "importPath": "openai.types.responses.response_file_search_tool_call_param", + "description": "openai.types.responses.response_file_search_tool_call_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_file_search_tool_call_param", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCall", + "importPath": "openai.types.responses.response_function_tool_call", + "description": "openai.types.responses.response_function_tool_call", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_tool_call", + "documentation": {} + }, + { + "label": "ResponseFunctionToolCallParam", + "importPath": "openai.types.responses.response_function_tool_call_param", + "description": "openai.types.responses.response_function_tool_call_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_tool_call_param", + "documentation": {} + }, + { + "label": "ActionSearch", + "importPath": "openai.types.responses.response_function_web_search", + "description": "openai.types.responses.response_function_web_search", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_web_search", + "documentation": {} + }, + { + "label": "ResponseFunctionWebSearch", + "importPath": "openai.types.responses.response_function_web_search", + "description": "openai.types.responses.response_function_web_search", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_web_search", + "documentation": {} + }, + { + "label": "ActionSearch", + "importPath": "openai.types.responses.response_function_web_search", + "description": "openai.types.responses.response_function_web_search", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_web_search", + "documentation": {} + }, + { + "label": "ResponseFunctionWebSearchParam", + "importPath": "openai.types.responses.response_function_web_search_param", + "description": "openai.types.responses.response_function_web_search_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_function_web_search_param", + "documentation": {} + }, + { + "label": "ResponseOutputMessage", + "importPath": "openai.types.responses.response_output_message", + "description": "openai.types.responses.response_output_message", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_message", + "documentation": {} + }, + { + "label": "ResponseOutputMessageParam", + "importPath": "openai.types.responses.response_output_message_param", + "description": "openai.types.responses.response_output_message_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_message_param", + "documentation": {} + }, + { + "label": "ResponseOutputRefusal", + "importPath": "openai.types.responses.response_output_refusal", + "description": "openai.types.responses.response_output_refusal", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_refusal", + "documentation": {} + }, + { + "label": "ResponseOutputText", + "importPath": "openai.types.responses.response_output_text", + "description": "openai.types.responses.response_output_text", + "isExtraImport": true, + "detail": "openai.types.responses.response_output_text", + "documentation": {} + }, + { + "label": "ResponseReasoningItemParam", + "importPath": "openai.types.responses.response_reasoning_item_param", + "description": "openai.types.responses.response_reasoning_item_param", + "isExtraImport": true, + "detail": "openai.types.responses.response_reasoning_item_param", + "documentation": {} + }, + { + "label": "ChatCmplHelpers", + "importPath": "agents.models.chatcmpl_helpers", + "description": "agents.models.chatcmpl_helpers", + "isExtraImport": true, + "detail": "agents.models.chatcmpl_helpers", + "documentation": {} + }, + { + "label": "FAKE_RESPONSES_ID", + "importPath": "agents.models.fake_id", + "description": "agents.models.fake_id", + "isExtraImport": true, + "detail": "agents.models.fake_id", + "documentation": {} + }, + { + "label": "FAKE_RESPONSES_ID", + "importPath": "agents.models.fake_id", + "description": "agents.models.fake_id", + "isExtraImport": true, + "detail": "agents.models.fake_id", + "documentation": {} + }, + { + "label": "Converter", + "importPath": "agents.models.chatcmpl_converter", + "description": "agents.models.chatcmpl_converter", + "isExtraImport": true, + "detail": "agents.models.chatcmpl_converter", + "documentation": {} + }, + { + "label": "Converter", + "importPath": "agents.models.chatcmpl_converter", + "description": "agents.models.chatcmpl_converter", + "isExtraImport": true, + "detail": "agents.models.chatcmpl_converter", + "documentation": {} + }, + { + "label": "_json", + "importPath": "agents.util", + "description": "agents.util", + "isExtraImport": true, + "detail": "agents.util", + "documentation": {} + }, + { + "label": "pretty_print_result", + "importPath": "agents.util._pretty_print", + "description": "agents.util._pretty_print", + "isExtraImport": true, + "detail": "agents.util._pretty_print", + "documentation": {} + }, + { + "label": "pretty_print_run_result_streaming", + "importPath": "agents.util._pretty_print", + "description": "agents.util._pretty_print", + "isExtraImport": true, + "detail": "agents.util._pretty_print", + "documentation": {} + }, + { + "label": "FakeModel", + "importPath": "tests.fake_model", + "description": "tests.fake_model", + "isExtraImport": true, + "detail": "tests.fake_model", + "documentation": {} + }, + { + "label": "ResponseSpanData", + "importPath": "agents.tracing.span_data", + "description": "agents.tracing.span_data", + "isExtraImport": true, + "detail": "agents.tracing.span_data", + "documentation": {} + }, + { + "label": "AgentSpanData", + "importPath": "agents.tracing.span_data", + "description": "agents.tracing.span_data", + "isExtraImport": true, + "detail": "agents.tracing.span_data", + "documentation": {} + }, + { + "label": "fake_model", + "importPath": "tests", + "description": "tests", + "isExtraImport": true, + "detail": "tests", + "documentation": {} + }, + { + "label": "mock", + "importPath": "unittest", + "description": "unittest", + "isExtraImport": true, + "detail": "unittest", + "documentation": {} + }, + { + "label": "websockets.exceptions", + "kind": 6, + "isExtraImport": true, + "importPath": "websockets.exceptions", + "description": "websockets.exceptions", + "detail": "websockets.exceptions", + "documentation": {} + }, + { + "label": "ensure_strict_json_schema", + "importPath": "agents.strict_schema", + "description": "agents.strict_schema", + "isExtraImport": true, + "detail": "agents.strict_schema", + "documentation": {} + }, + { + "label": "TracingProcessor", + "importPath": "agents.tracing.processor_interface", + "description": "agents.tracing.processor_interface", + "isExtraImport": true, + "detail": "agents.tracing.processor_interface", + "documentation": {} + }, + { + "label": "SpanImpl", + "importPath": "agents.tracing.spans", + "description": "agents.tracing.spans", + "isExtraImport": true, + "detail": "agents.tracing.spans", + "documentation": {} + }, + { + "label": "SpanError", + "importPath": "agents.tracing.spans", + "description": "agents.tracing.spans", + "isExtraImport": true, + "detail": "agents.tracing.spans", + "documentation": {} + }, + { + "label": "TraceImpl", + "importPath": "agents.tracing.traces", + "description": "agents.tracing.traces", + "isExtraImport": true, + "detail": "agents.tracing.traces", + "documentation": {} + }, + { + "label": "draw_graph", + "importPath": "agents.extensions.visualization", + "description": "agents.extensions.visualization", + "isExtraImport": true, + "detail": "agents.extensions.visualization", + "documentation": {} + }, + { + "label": "get_all_edges", + "importPath": "agents.extensions.visualization", + "description": "agents.extensions.visualization", + "isExtraImport": true, + "detail": "agents.extensions.visualization", + "documentation": {} + }, + { + "label": "get_all_nodes", + "importPath": "agents.extensions.visualization", + "description": "agents.extensions.visualization", + "isExtraImport": true, + "detail": "agents.extensions.visualization", + "documentation": {} + }, + { + "label": "get_main_graph", + "importPath": "agents.extensions.visualization", + "description": "agents.extensions.visualization", + "isExtraImport": true, + "detail": "agents.extensions.visualization", + "documentation": {} + }, + { + "label": "bin_dir", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "bin_dir = os.path.dirname(abs_file)\nbase = bin_dir[: -len(\"bin\") - 1] # strip away the bin part from the __file__, plus the path separator\n# prepend bin to PATH (this file is inside the bin directory)\nos.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "base", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "base = bin_dir[: -len(\"bin\") - 1] # strip away the bin part from the __file__, plus the path separator\n# prepend bin to PATH (this file is inside the bin directory)\nos.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "os.environ[\"PATH\"]", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "os.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "os.environ[\"VIRTUAL_ENV\"]", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "os.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "os.environ[\"VIRTUAL_ENV_PROMPT\"]", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "os.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "prev_length", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "prev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "sys.path[:]", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "sys.real_prefix", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "sys.real_prefix = sys.prefix\nsys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "sys.prefix", + "kind": 5, + "importPath": ".venv.bin.activate_this", + "description": ".venv.bin.activate_this", + "peekOfCode": "sys.prefix = base", + "detail": ".venv.bin.activate_this", + "documentation": {} + }, + { + "label": "built_instructions", + "kind": 2, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "def built_instructions(target_language: str, lang_code: str) -> str:\n do_not_translate_terms = \"\\n\".join(do_not_translate)\n specific_terms = \"\\n\".join(\n [f\"* {k} -> {v}\" for k, v in eng_to_non_eng_mapping.get(lang_code, {}).items()]\n )\n specific_instructions = \"\\n\".join(\n eng_to_non_eng_instructions.get(\"common\", [])\n + eng_to_non_eng_instructions.get(lang_code, [])\n )\n return f\"\"\"You are an expert technical translator.", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "translate_file", + "kind": 2, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "def translate_file(file_path: str, target_path: str, lang_code: str) -> None:\n print(f\"Translating {file_path} into a different language: {lang_code}\")\n with open(file_path, encoding=\"utf-8\") as f:\n content = f.read()\n # Split content into lines\n lines: list[str] = content.splitlines()\n chunks: list[str] = []\n current_chunk: list[str] = []\n # Split content into chunks of up to 120 lines, ensuring splits occur before section titles\n in_code_block = False", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "translate_single_source_file", + "kind": 2, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "def translate_single_source_file(file_path: str) -> None:\n relative_path = os.path.relpath(file_path, source_dir)\n if \"ref/\" in relative_path or not file_path.endswith(\".md\"):\n return\n for lang_code in languages:\n target_dir = os.path.join(source_dir, lang_code)\n target_path = os.path.join(target_dir, relative_path)\n # Ensure the target directory exists\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n # Translate and save the file", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "def main():\n parser = argparse.ArgumentParser(description=\"Translate documentation files\")\n parser.add_argument(\n \"--file\", type=str, help=\"Specific file to translate (relative to docs directory)\"\n )\n args = parser.parse_args()\n if args.file:\n # Translate a single file\n # Handle both \"foo.md\" and \"docs/foo.md\" formats\n if args.file.startswith(\"docs/\"):", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "OPENAI_MODEL", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "OPENAI_MODEL = os.environ.get(\"OPENAI_MODEL\", \"o3\")\nENABLE_CODE_SNIPPET_EXCLUSION = True\n# gpt-4.5 needed this for better quality\nENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "ENABLE_CODE_SNIPPET_EXCLUSION", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "ENABLE_CODE_SNIPPET_EXCLUSION = True\n# gpt-4.5 needed this for better quality\nENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "ENABLE_SMALL_CHUNK_TRANSLATION", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "ENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "SEARCH_EXCLUSION", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "SEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "source_dir", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "source_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"\n}\n# Initialize OpenAI client\nopenai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "languages", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "languages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"\n}\n# Initialize OpenAI client\nopenai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "openai_client", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "openai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",\n \"Hello World\",\n \"Model context protocol\",\n \"MCP\",\n \"structured outputs\",\n \"Chain-of-Thought\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "do_not_translate", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "do_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",\n \"Hello World\",\n \"Model context protocol\",\n \"MCP\",\n \"structured outputs\",\n \"Chain-of-Thought\",\n \"Chat Completions\",\n \"Computer-Using Agent\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "eng_to_non_eng_mapping", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "eng_to_non_eng_mapping = {\n \"ja\": {\n \"agents\": \"エージェント\",\n \"computer use\": \"コンピュータ操作\",\n \"OAI hosted tools\": \"OpenAI がホストするツール\",\n \"well formed data\": \"適切な形式のデータ\",\n \"guardrail\": \"ガードレール\",\n \"handoffs\": \"ハンドオフ\",\n \"function tools\": \"関数ツール\",\n \"tracing\": \"トレーシング\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "eng_to_non_eng_instructions", + "kind": 5, + "importPath": "docs.scripts.translate_docs", + "description": "docs.scripts.translate_docs", + "peekOfCode": "eng_to_non_eng_instructions = {\n \"common\": [\n \"* The term 'examples' must be code examples when the page mentions the code examples in the repo, it can be translated as either 'code examples' or 'sample code'.\",\n \"* The term 'primitives' can be translated as basic components.\",\n \"* When the terms 'instructions' and 'tools' are mentioned as API parameter names, they must be kept as is.\",\n \"* The terms 'temperature', 'top_p', 'max_tokens', 'presence_penalty', 'frequency_penalty' as parameter names must be kept as is.\",\n ],\n \"ja\": [\n \"* The term 'result' in the Runner guide context must be translated like 'execution results'\",\n \"* The term 'raw' in 'raw response events' must be kept as is\",", + "detail": "docs.scripts.translate_docs", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.agent_patterns.agents_as_tools", + "description": "examples.agent_patterns.agents_as_tools", + "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You translate the user's message to Spanish\",\n handoff_description=\"An english to spanish translator\",\n)\nfrench_agent = Agent(\n name=\"french_agent\",\n instructions=\"You translate the user's message to French\",\n handoff_description=\"An english to french translator\",\n)", + "detail": "examples.agent_patterns.agents_as_tools", + "documentation": {} + }, + { + "label": "french_agent", + "kind": 5, + "importPath": "examples.agent_patterns.agents_as_tools", + "description": "examples.agent_patterns.agents_as_tools", + "peekOfCode": "french_agent = Agent(\n name=\"french_agent\",\n instructions=\"You translate the user's message to French\",\n handoff_description=\"An english to french translator\",\n)\nitalian_agent = Agent(\n name=\"italian_agent\",\n instructions=\"You translate the user's message to Italian\",\n handoff_description=\"An english to italian translator\",\n)", + "detail": "examples.agent_patterns.agents_as_tools", + "documentation": {} + }, + { + "label": "italian_agent", + "kind": 5, + "importPath": "examples.agent_patterns.agents_as_tools", + "description": "examples.agent_patterns.agents_as_tools", + "peekOfCode": "italian_agent = Agent(\n name=\"italian_agent\",\n instructions=\"You translate the user's message to Italian\",\n handoff_description=\"An english to italian translator\",\n)\norchestrator_agent = Agent(\n name=\"orchestrator_agent\",\n instructions=(\n \"You are a translation agent. You use the tools given to you to translate.\"\n \"If asked for multiple translations, you call the relevant tools in order.\"", + "detail": "examples.agent_patterns.agents_as_tools", + "documentation": {} + }, + { + "label": "orchestrator_agent", + "kind": 5, + "importPath": "examples.agent_patterns.agents_as_tools", + "description": "examples.agent_patterns.agents_as_tools", + "peekOfCode": "orchestrator_agent = Agent(\n name=\"orchestrator_agent\",\n instructions=(\n \"You are a translation agent. You use the tools given to you to translate.\"\n \"If asked for multiple translations, you call the relevant tools in order.\"\n \"You never translate on your own, you always use the provided tools.\"\n ),\n tools=[\n spanish_agent.as_tool(\n tool_name=\"translate_to_spanish\",", + "detail": "examples.agent_patterns.agents_as_tools", + "documentation": {} + }, + { + "label": "synthesizer_agent", + "kind": 5, + "importPath": "examples.agent_patterns.agents_as_tools", + "description": "examples.agent_patterns.agents_as_tools", + "peekOfCode": "synthesizer_agent = Agent(\n name=\"synthesizer_agent\",\n instructions=\"You inspect translations, correct them if needed, and produce a final concatenated response.\",\n)\nasync def main():\n msg = input(\"Hi! What would you like translated, and to which languages? \")\n # Run the entire orchestration in a single trace\n with trace(\"Orchestrator evaluator\"):\n orchestrator_result = await Runner.run(orchestrator_agent, msg)\n for item in orchestrator_result.new_items:", + "detail": "examples.agent_patterns.agents_as_tools", + "documentation": {} + }, + { + "label": "OutlineCheckerOutput", + "kind": 6, + "importPath": "examples.agent_patterns.deterministic", + "description": "examples.agent_patterns.deterministic", + "peekOfCode": "class OutlineCheckerOutput(BaseModel):\n good_quality: bool\n is_scifi: bool\noutline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",\n output_type=OutlineCheckerOutput,\n)\nstory_agent = Agent(\n name=\"story_agent\",", + "detail": "examples.agent_patterns.deterministic", + "documentation": {} + }, + { + "label": "story_outline_agent", + "kind": 5, + "importPath": "examples.agent_patterns.deterministic", + "description": "examples.agent_patterns.deterministic", + "peekOfCode": "story_outline_agent = Agent(\n name=\"story_outline_agent\",\n instructions=\"Generate a very short story outline based on the user's input.\",\n)\nclass OutlineCheckerOutput(BaseModel):\n good_quality: bool\n is_scifi: bool\noutline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",", + "detail": "examples.agent_patterns.deterministic", + "documentation": {} + }, + { + "label": "outline_checker_agent", + "kind": 5, + "importPath": "examples.agent_patterns.deterministic", + "description": "examples.agent_patterns.deterministic", + "peekOfCode": "outline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",\n output_type=OutlineCheckerOutput,\n)\nstory_agent = Agent(\n name=\"story_agent\",\n instructions=\"Write a short story based on the given outline.\",\n output_type=str,\n)", + "detail": "examples.agent_patterns.deterministic", + "documentation": {} + }, + { + "label": "story_agent", + "kind": 5, + "importPath": "examples.agent_patterns.deterministic", + "description": "examples.agent_patterns.deterministic", + "peekOfCode": "story_agent = Agent(\n name=\"story_agent\",\n instructions=\"Write a short story based on the given outline.\",\n output_type=str,\n)\nasync def main():\n input_prompt = input(\"What kind of story do you want? \")\n # Ensure the entire workflow is a single trace\n with trace(\"Deterministic story flow\"):\n # 1. Generate an outline", + "detail": "examples.agent_patterns.deterministic", + "documentation": {} + }, + { + "label": "Weather", + "kind": 6, + "importPath": "examples.agent_patterns.forcing_tool_use", + "description": "examples.agent_patterns.forcing_tool_use", + "peekOfCode": "class Weather(BaseModel):\n city: str\n temperature_range: str\n conditions: str\n@function_tool\ndef get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind\")\nasync def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]", + "detail": "examples.agent_patterns.forcing_tool_use", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.agent_patterns.forcing_tool_use", + "description": "examples.agent_patterns.forcing_tool_use", + "peekOfCode": "def get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind\")\nasync def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]\n) -> ToolsToFinalOutputResult:\n weather: Weather = results[0].output\n return ToolsToFinalOutputResult(\n is_final_output=True, final_output=f\"{weather.city} is {weather.conditions}.\"\n )", + "detail": "examples.agent_patterns.forcing_tool_use", + "documentation": {} + }, + { + "label": "MathHomeworkOutput", + "kind": 6, + "importPath": "examples.agent_patterns.input_guardrails", + "description": "examples.agent_patterns.input_guardrails", + "peekOfCode": "class MathHomeworkOutput(BaseModel):\n reasoning: str\n is_math_homework: bool\nguardrail_agent = Agent(\n name=\"Guardrail check\",\n instructions=\"Check if the user is asking you to do their math homework.\",\n output_type=MathHomeworkOutput,\n)\n@input_guardrail\nasync def math_guardrail(", + "detail": "examples.agent_patterns.input_guardrails", + "documentation": {} + }, + { + "label": "guardrail_agent", + "kind": 5, + "importPath": "examples.agent_patterns.input_guardrails", + "description": "examples.agent_patterns.input_guardrails", + "peekOfCode": "guardrail_agent = Agent(\n name=\"Guardrail check\",\n instructions=\"Check if the user is asking you to do their math homework.\",\n output_type=MathHomeworkOutput,\n)\n@input_guardrail\nasync def math_guardrail(\n context: RunContextWrapper[None], agent: Agent, input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n \"\"\"This is an input guardrail function, which happens to call an agent to check if the input", + "detail": "examples.agent_patterns.input_guardrails", + "documentation": {} + }, + { + "label": "EvaluationFeedback", + "kind": 6, + "importPath": "examples.agent_patterns.llm_as_a_judge", + "description": "examples.agent_patterns.llm_as_a_judge", + "peekOfCode": "class EvaluationFeedback:\n feedback: str\n score: Literal[\"pass\", \"needs_improvement\", \"fail\"]\nevaluator = Agent[None](\n name=\"evaluator\",\n instructions=(\n \"You evaluate a story outline and decide if it's good enough.\"\n \"If it's not good enough, you provide feedback on what needs to be improved.\"\n \"Never give it a pass on the first try. After 5 attempts, you can give it a pass if story outline is good enough - do not go for perfection\"\n ),", + "detail": "examples.agent_patterns.llm_as_a_judge", + "documentation": {} + }, + { + "label": "story_outline_generator", + "kind": 5, + "importPath": "examples.agent_patterns.llm_as_a_judge", + "description": "examples.agent_patterns.llm_as_a_judge", + "peekOfCode": "story_outline_generator = Agent(\n name=\"story_outline_generator\",\n instructions=(\n \"You generate a very short story outline based on the user's input.\"\n \"If there is any feedback provided, use it to improve the outline.\"\n ),\n)\n@dataclass\nclass EvaluationFeedback:\n feedback: str", + "detail": "examples.agent_patterns.llm_as_a_judge", + "documentation": {} + }, + { + "label": "evaluator", + "kind": 5, + "importPath": "examples.agent_patterns.llm_as_a_judge", + "description": "examples.agent_patterns.llm_as_a_judge", + "peekOfCode": "evaluator = Agent[None](\n name=\"evaluator\",\n instructions=(\n \"You evaluate a story outline and decide if it's good enough.\"\n \"If it's not good enough, you provide feedback on what needs to be improved.\"\n \"Never give it a pass on the first try. After 5 attempts, you can give it a pass if story outline is good enough - do not go for perfection\"\n ),\n output_type=EvaluationFeedback,\n)\nasync def main() -> None:", + "detail": "examples.agent_patterns.llm_as_a_judge", + "documentation": {} + }, + { + "label": "MessageOutput", + "kind": 6, + "importPath": "examples.agent_patterns.output_guardrails", + "description": "examples.agent_patterns.output_guardrails", + "peekOfCode": "class MessageOutput(BaseModel):\n reasoning: str = Field(description=\"Thoughts on how to respond to the user's message\")\n response: str = Field(description=\"The response to the user's message\")\n user_name: str | None = Field(description=\"The name of the user who sent the message, if known\")\n@output_guardrail\nasync def sensitive_data_check(\n context: RunContextWrapper, agent: Agent, output: MessageOutput\n) -> GuardrailFunctionOutput:\n phone_number_in_response = \"650\" in output.response\n phone_number_in_reasoning = \"650\" in output.reasoning", + "detail": "examples.agent_patterns.output_guardrails", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.agent_patterns.output_guardrails", + "description": "examples.agent_patterns.output_guardrails", + "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n output_type=MessageOutput,\n output_guardrails=[sensitive_data_check],\n)\nasync def main():\n # This should be ok\n await Runner.run(agent, \"What's the capital of California?\")\n print(\"First message passed\")", + "detail": "examples.agent_patterns.output_guardrails", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.agent_patterns.parallelization", + "description": "examples.agent_patterns.parallelization", + "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You translate the user's message to Spanish\",\n)\ntranslation_picker = Agent(\n name=\"translation_picker\",\n instructions=\"You pick the best Spanish translation from the given options.\",\n)\nasync def main():\n msg = input(\"Hi! Enter a message, and we'll translate it to Spanish.\\n\\n\")", + "detail": "examples.agent_patterns.parallelization", + "documentation": {} + }, + { + "label": "translation_picker", + "kind": 5, + "importPath": "examples.agent_patterns.parallelization", + "description": "examples.agent_patterns.parallelization", + "peekOfCode": "translation_picker = Agent(\n name=\"translation_picker\",\n instructions=\"You pick the best Spanish translation from the given options.\",\n)\nasync def main():\n msg = input(\"Hi! Enter a message, and we'll translate it to Spanish.\\n\\n\")\n # Ensure the entire workflow is a single trace\n with trace(\"Parallel translation\"):\n res_1, res_2, res_3 = await asyncio.gather(\n Runner.run(", + "detail": "examples.agent_patterns.parallelization", + "documentation": {} + }, + { + "label": "french_agent", + "kind": 5, + "importPath": "examples.agent_patterns.routing", + "description": "examples.agent_patterns.routing", + "peekOfCode": "french_agent = Agent(\n name=\"french_agent\",\n instructions=\"You only speak French\",\n)\nspanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You only speak Spanish\",\n)\nenglish_agent = Agent(\n name=\"english_agent\",", + "detail": "examples.agent_patterns.routing", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.agent_patterns.routing", + "description": "examples.agent_patterns.routing", + "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You only speak Spanish\",\n)\nenglish_agent = Agent(\n name=\"english_agent\",\n instructions=\"You only speak English\",\n)\ntriage_agent = Agent(\n name=\"triage_agent\",", + "detail": "examples.agent_patterns.routing", + "documentation": {} + }, + { + "label": "english_agent", + "kind": 5, + "importPath": "examples.agent_patterns.routing", + "description": "examples.agent_patterns.routing", + "peekOfCode": "english_agent = Agent(\n name=\"english_agent\",\n instructions=\"You only speak English\",\n)\ntriage_agent = Agent(\n name=\"triage_agent\",\n instructions=\"Handoff to the appropriate agent based on the language of the request.\",\n handoffs=[french_agent, spanish_agent, english_agent],\n)\nasync def main():", + "detail": "examples.agent_patterns.routing", + "documentation": {} + }, + { + "label": "triage_agent", + "kind": 5, + "importPath": "examples.agent_patterns.routing", + "description": "examples.agent_patterns.routing", + "peekOfCode": "triage_agent = Agent(\n name=\"triage_agent\",\n instructions=\"Handoff to the appropriate agent based on the language of the request.\",\n handoffs=[french_agent, spanish_agent, english_agent],\n)\nasync def main():\n # We'll create an ID for this conversation, so we can link each trace\n conversation_id = str(uuid.uuid4().hex[:16])\n msg = input(\"Hi! We speak French, Spanish and English. How can I help? \")\n agent = triage_agent", + "detail": "examples.agent_patterns.routing", + "documentation": {} + }, + { + "label": "GuardrailOutput", + "kind": 6, + "importPath": "examples.agent_patterns.streaming_guardrails", + "description": "examples.agent_patterns.streaming_guardrails", + "peekOfCode": "class GuardrailOutput(BaseModel):\n reasoning: str = Field(\n description=\"Reasoning about whether the response could be understood by a ten year old.\"\n )\n is_readable_by_ten_year_old: bool = Field(\n description=\"Whether the response is understandable by a ten year old.\"\n )\nguardrail_agent = Agent(\n name=\"Checker\",\n instructions=(", + "detail": "examples.agent_patterns.streaming_guardrails", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.agent_patterns.streaming_guardrails", + "description": "examples.agent_patterns.streaming_guardrails", + "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"You are a helpful assistant. You ALWAYS write long responses, making sure to be verbose \"\n \"and detailed.\"\n ),\n)\nclass GuardrailOutput(BaseModel):\n reasoning: str = Field(\n description=\"Reasoning about whether the response could be understood by a ten year old.\"", + "detail": "examples.agent_patterns.streaming_guardrails", + "documentation": {} + }, + { + "label": "guardrail_agent", + "kind": 5, + "importPath": "examples.agent_patterns.streaming_guardrails", + "description": "examples.agent_patterns.streaming_guardrails", + "peekOfCode": "guardrail_agent = Agent(\n name=\"Checker\",\n instructions=(\n \"You will be given a question and a response. Your goal is to judge whether the response \"\n \"is simple enough to be understood by a ten year old.\"\n ),\n output_type=GuardrailOutput,\n model=\"gpt-4o-mini\",\n)\nasync def check_guardrail(text: str) -> GuardrailOutput:", + "detail": "examples.agent_patterns.streaming_guardrails", + "documentation": {} + }, + { + "label": "CustomAgentHooks", + "kind": 6, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "class CustomAgentHooks(AgentHooks):\n def __init__(self, display_name: str):\n self.event_counter = 0\n self.display_name = display_name\n async def on_start(self, context: RunContextWrapper, agent: Agent) -> None:\n self.event_counter += 1\n print(f\"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started\")\n async def on_end(self, context: RunContextWrapper, agent: Agent, output: Any) -> None:\n self.event_counter += 1\n print(", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "FinalResult", + "kind": 6, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "class FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n hooks=CustomAgentHooks(display_name=\"Multiply Agent\"),\n)\nstart_agent = Agent(", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "random_number", + "kind": 2, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "def random_number(max: int) -> int:\n \"\"\"\n Generate a random number up to the provided maximum.\n \"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Simple multiplication by two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "multiply_by_two", + "kind": 2, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "def multiply_by_two(x: int) -> int:\n \"\"\"Simple multiplication by two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "multiply_agent", + "kind": 5, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "multiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n hooks=CustomAgentHooks(display_name=\"Multiply Agent\"),\n)\nstart_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiply agent.\",", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "start_agent", + "kind": 5, + "importPath": "examples.basic.agent_lifecycle_example", + "description": "examples.basic.agent_lifecycle_example", + "peekOfCode": "start_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiply agent.\",\n tools=[random_number],\n output_type=FinalResult,\n handoffs=[multiply_agent],\n hooks=CustomAgentHooks(display_name=\"Start Agent\"),\n)\nasync def main() -> None:\n user_input = input(\"Enter a max number: \")", + "detail": "examples.basic.agent_lifecycle_example", + "documentation": {} + }, + { + "label": "CustomContext", + "kind": 6, + "importPath": "examples.basic.dynamic_system_prompt", + "description": "examples.basic.dynamic_system_prompt", + "peekOfCode": "class CustomContext:\n def __init__(self, style: Literal[\"haiku\", \"pirate\", \"robot\"]):\n self.style = style\ndef custom_instructions(\n run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext]\n) -> str:\n context = run_context.context\n if context.style == \"haiku\":\n return \"Only respond in haikus.\"\n elif context.style == \"pirate\":", + "detail": "examples.basic.dynamic_system_prompt", + "documentation": {} + }, + { + "label": "custom_instructions", + "kind": 2, + "importPath": "examples.basic.dynamic_system_prompt", + "description": "examples.basic.dynamic_system_prompt", + "peekOfCode": "def custom_instructions(\n run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext]\n) -> str:\n context = run_context.context\n if context.style == \"haiku\":\n return \"Only respond in haikus.\"\n elif context.style == \"pirate\":\n return \"Respond as a pirate.\"\n else:\n return \"Respond as a robot and say 'beep boop' a lot.\"", + "detail": "examples.basic.dynamic_system_prompt", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.basic.dynamic_system_prompt", + "description": "examples.basic.dynamic_system_prompt", + "peekOfCode": "agent = Agent(\n name=\"Chat agent\",\n instructions=custom_instructions,\n)\nasync def main():\n choice: Literal[\"haiku\", \"pirate\", \"robot\"] = random.choice([\"haiku\", \"pirate\", \"robot\"])\n context = CustomContext(style=choice)\n print(f\"Using style: {choice}\\n\")\n user_message = \"Tell me a joke.\"\n print(f\"User: {user_message}\")", + "detail": "examples.basic.dynamic_system_prompt", + "documentation": {} + }, + { + "label": "ExampleHooks", + "kind": 6, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "class ExampleHooks(RunHooks):\n def __init__(self):\n self.event_counter = 0\n def _usage_to_str(self, usage: Usage) -> str:\n return f\"{usage.requests} requests, {usage.input_tokens} input tokens, {usage.output_tokens} output tokens, {usage.total_tokens} total tokens\"\n async def on_agent_start(self, context: RunContextWrapper, agent: Agent) -> None:\n self.event_counter += 1\n print(\n f\"### {self.event_counter}: Agent {agent.name} started. Usage: {self._usage_to_str(context.usage)}\"\n )", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "FinalResult", + "kind": 6, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "class FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n)\nstart_agent = Agent(\n name=\"Start Agent\",", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "random_number", + "kind": 2, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "def random_number(max: int) -> int:\n \"\"\"Generate a random number up to the provided max.\"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "multiply_by_two", + "kind": 2, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "def multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "hooks", + "kind": 5, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "hooks = ExampleHooks()\n###\n@function_tool\ndef random_number(max: int) -> int:\n \"\"\"Generate a random number up to the provided max.\"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "multiply_agent", + "kind": 5, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "multiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n)\nstart_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.\",\n tools=[random_number],", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "start_agent", + "kind": 5, + "importPath": "examples.basic.lifecycle_example", + "description": "examples.basic.lifecycle_example", + "peekOfCode": "start_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.\",\n tools=[random_number],\n output_type=FinalResult,\n handoffs=[multiply_agent],\n)\nasync def main() -> None:\n user_input = input(\"Enter a max number: \")\n await Runner.run(", + "detail": "examples.basic.lifecycle_example", + "documentation": {} + }, + { + "label": "image_to_base64", + "kind": 2, + "importPath": "examples.basic.local_image", + "description": "examples.basic.local_image", + "peekOfCode": "def image_to_base64(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n return encoded_string\nasync def main():\n # Print base64-encoded image\n b64_image = image_to_base64(FILEPATH)\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",", + "detail": "examples.basic.local_image", + "documentation": {} + }, + { + "label": "FILEPATH", + "kind": 5, + "importPath": "examples.basic.local_image", + "description": "examples.basic.local_image", + "peekOfCode": "FILEPATH = os.path.join(os.path.dirname(__file__), \"media/image_bison.jpg\")\ndef image_to_base64(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n return encoded_string\nasync def main():\n # Print base64-encoded image\n b64_image = image_to_base64(FILEPATH)\n agent = Agent(\n name=\"Assistant\",", + "detail": "examples.basic.local_image", + "documentation": {} + }, + { + "label": "OutputType", + "kind": 6, + "importPath": "examples.basic.non_strict_output_type", + "description": "examples.basic.non_strict_output_type", + "peekOfCode": "class OutputType:\n jokes: dict[int, str]\n \"\"\"A list of jokes, indexed by joke number.\"\"\"\nclass CustomOutputSchema(AgentOutputSchemaBase):\n \"\"\"A demonstration of a custom output schema.\"\"\"\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"CustomOutputSchema\"\n def json_schema(self) -> dict[str, Any]:", + "detail": "examples.basic.non_strict_output_type", + "documentation": {} + }, + { + "label": "CustomOutputSchema", + "kind": 6, + "importPath": "examples.basic.non_strict_output_type", + "description": "examples.basic.non_strict_output_type", + "peekOfCode": "class CustomOutputSchema(AgentOutputSchemaBase):\n \"\"\"A demonstration of a custom output schema.\"\"\"\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"CustomOutputSchema\"\n def json_schema(self) -> dict[str, Any]:\n return {\n \"type\": \"object\",\n \"properties\": {\"jokes\": {\"type\": \"object\", \"properties\": {\"joke\": {\"type\": \"string\"}}}},", + "detail": "examples.basic.non_strict_output_type", + "documentation": {} + }, + { + "label": "DynamicContext", + "kind": 6, + "importPath": "examples.basic.prompt_template", + "description": "examples.basic.prompt_template", + "peekOfCode": "class DynamicContext:\n def __init__(self, prompt_id: str):\n self.prompt_id = prompt_id\n self.poem_style = random.choice([\"limerick\", \"haiku\", \"ballad\"])\n print(f\"[debug] DynamicContext initialized with poem_style: {self.poem_style}\")\nasync def _get_dynamic_prompt(data: GenerateDynamicPromptData):\n ctx: DynamicContext = data.context.context\n return {\n \"id\": ctx.prompt_id,\n \"version\": \"1\",", + "detail": "examples.basic.prompt_template", + "documentation": {} + }, + { + "label": "DEFAULT_PROMPT_ID", + "kind": 5, + "importPath": "examples.basic.prompt_template", + "description": "examples.basic.prompt_template", + "peekOfCode": "DEFAULT_PROMPT_ID = \"pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b\"\nclass DynamicContext:\n def __init__(self, prompt_id: str):\n self.prompt_id = prompt_id\n self.poem_style = random.choice([\"limerick\", \"haiku\", \"ballad\"])\n print(f\"[debug] DynamicContext initialized with poem_style: {self.poem_style}\")\nasync def _get_dynamic_prompt(data: GenerateDynamicPromptData):\n ctx: DynamicContext = data.context.context\n return {\n \"id\": ctx.prompt_id,", + "detail": "examples.basic.prompt_template", + "documentation": {} + }, + { + "label": "URL", + "kind": 5, + "importPath": "examples.basic.remote_image", + "description": "examples.basic.remote_image", + "peekOfCode": "URL = \"https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n )\n result = await Runner.run(\n agent,\n [\n {", + "detail": "examples.basic.remote_image", + "documentation": {} + }, + { + "label": "how_many_jokes", + "kind": 2, + "importPath": "examples.basic.stream_items", + "description": "examples.basic.stream_items", + "peekOfCode": "def how_many_jokes() -> int:\n return random.randint(1, 10)\nasync def main():\n agent = Agent(\n name=\"Joker\",\n instructions=\"First call the `how_many_jokes` tool, then tell that many jokes.\",\n tools=[how_many_jokes],\n )\n result = Runner.run_streamed(\n agent,", + "detail": "examples.basic.stream_items", + "documentation": {} + }, + { + "label": "Weather", + "kind": 6, + "importPath": "examples.basic.tools", + "description": "examples.basic.tools", + "peekOfCode": "class Weather(BaseModel):\n city: str\n temperature_range: str\n conditions: str\n@function_tool\ndef get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind.\")\nagent = Agent(\n name=\"Hello world\",", + "detail": "examples.basic.tools", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.basic.tools", + "description": "examples.basic.tools", + "peekOfCode": "def get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind.\")\nagent = Agent(\n name=\"Hello world\",\n instructions=\"You are a helpful agent.\",\n tools=[get_weather],\n)\nasync def main():\n result = await Runner.run(agent, input=\"What's the weather in Tokyo?\")", + "detail": "examples.basic.tools", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.basic.tools", + "description": "examples.basic.tools", + "peekOfCode": "agent = Agent(\n name=\"Hello world\",\n instructions=\"You are a helpful agent.\",\n tools=[get_weather],\n)\nasync def main():\n result = await Runner.run(agent, input=\"What's the weather in Tokyo?\")\n print(result.final_output)\n # The weather in Tokyo is sunny.\nif __name__ == \"__main__\":", + "detail": "examples.basic.tools", + "documentation": {} + }, + { + "label": "AirlineAgentContext", + "kind": 6, + "importPath": "examples.customer_service.main", + "description": "examples.customer_service.main", + "peekOfCode": "class AirlineAgentContext(BaseModel):\n passenger_name: str | None = None\n confirmation_number: str | None = None\n seat_number: str | None = None\n flight_number: str | None = None\n### TOOLS\n@function_tool(\n name_override=\"faq_lookup_tool\", description_override=\"Lookup frequently asked questions.\"\n)\nasync def faq_lookup_tool(question: str) -> str:", + "detail": "examples.customer_service.main", + "documentation": {} + }, + { + "label": "faq_agent", + "kind": 5, + "importPath": "examples.customer_service.main", + "description": "examples.customer_service.main", + "peekOfCode": "faq_agent = Agent[AirlineAgentContext](\n name=\"FAQ Agent\",\n handoff_description=\"A helpful agent that can answer questions about the airline.\",\n instructions=f\"\"\"{RECOMMENDED_PROMPT_PREFIX}\n You are an FAQ agent. If you are speaking to a customer, you probably were transferred to from the triage agent.\n Use the following routine to support the customer.\n # Routine\n 1. Identify the last question asked by the customer.\n 2. Use the faq lookup tool to answer the question. Do not rely on your own knowledge.\n 3. If you cannot answer the question, transfer back to the triage agent.\"\"\",", + "detail": "examples.customer_service.main", + "documentation": {} + }, + { + "label": "seat_booking_agent", + "kind": 5, + "importPath": "examples.customer_service.main", + "description": "examples.customer_service.main", + "peekOfCode": "seat_booking_agent = Agent[AirlineAgentContext](\n name=\"Seat Booking Agent\",\n handoff_description=\"A helpful agent that can update a seat on a flight.\",\n instructions=f\"\"\"{RECOMMENDED_PROMPT_PREFIX}\n You are a seat booking agent. If you are speaking to a customer, you probably were transferred to from the triage agent.\n Use the following routine to support the customer.\n # Routine\n 1. Ask for their confirmation number.\n 2. Ask the customer what their desired seat number is.\n 3. Use the update seat tool to update the seat on the flight.", + "detail": "examples.customer_service.main", + "documentation": {} + }, + { + "label": "triage_agent", + "kind": 5, + "importPath": "examples.customer_service.main", + "description": "examples.customer_service.main", + "peekOfCode": "triage_agent = Agent[AirlineAgentContext](\n name=\"Triage Agent\",\n handoff_description=\"A triage agent that can delegate a customer's request to the appropriate agent.\",\n instructions=(\n f\"{RECOMMENDED_PROMPT_PREFIX} \"\n \"You are a helpful triaging agent. You can use your tools to delegate questions to other appropriate agents.\"\n ),\n handoffs=[\n faq_agent,\n handoff(agent=seat_booking_agent, on_handoff=on_seat_booking_handoff),", + "detail": "examples.customer_service.main", + "documentation": {} + }, + { + "label": "AnalysisSummary", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.financials_agent", + "description": "examples.financial_research_agent.agents.financials_agent", + "peekOfCode": "class AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nfinancials_agent = Agent(\n name=\"FundamentalsAnalystAgent\",\n instructions=FINANCIALS_PROMPT,\n output_type=AnalysisSummary,\n)", + "detail": "examples.financial_research_agent.agents.financials_agent", + "documentation": {} + }, + { + "label": "FINANCIALS_PROMPT", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.financials_agent", + "description": "examples.financial_research_agent.agents.financials_agent", + "peekOfCode": "FINANCIALS_PROMPT = (\n \"You are a financial analyst focused on company fundamentals such as revenue, \"\n \"profit, margins and growth trajectory. Given a collection of web (and optional file) \"\n \"search results about a company, write a concise analysis of its recent financial \"\n \"performance. Pull out key metrics or quotes. Keep it under 2 paragraphs.\"\n)\nclass AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nfinancials_agent = Agent(", + "detail": "examples.financial_research_agent.agents.financials_agent", + "documentation": {} + }, + { + "label": "financials_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.financials_agent", + "description": "examples.financial_research_agent.agents.financials_agent", + "peekOfCode": "financials_agent = Agent(\n name=\"FundamentalsAnalystAgent\",\n instructions=FINANCIALS_PROMPT,\n output_type=AnalysisSummary,\n)", + "detail": "examples.financial_research_agent.agents.financials_agent", + "documentation": {} + }, + { + "label": "FinancialSearchItem", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.planner_agent", + "description": "examples.financial_research_agent.agents.planner_agent", + "peekOfCode": "class FinancialSearchItem(BaseModel):\n reason: str\n \"\"\"Your reasoning for why this search is relevant.\"\"\"\n query: str\n \"\"\"The search term to feed into a web (or file) search.\"\"\"\nclass FinancialSearchPlan(BaseModel):\n searches: list[FinancialSearchItem]\n \"\"\"A list of searches to perform.\"\"\"\nplanner_agent = Agent(\n name=\"FinancialPlannerAgent\",", + "detail": "examples.financial_research_agent.agents.planner_agent", + "documentation": {} + }, + { + "label": "FinancialSearchPlan", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.planner_agent", + "description": "examples.financial_research_agent.agents.planner_agent", + "peekOfCode": "class FinancialSearchPlan(BaseModel):\n searches: list[FinancialSearchItem]\n \"\"\"A list of searches to perform.\"\"\"\nplanner_agent = Agent(\n name=\"FinancialPlannerAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=FinancialSearchPlan,\n)", + "detail": "examples.financial_research_agent.agents.planner_agent", + "documentation": {} + }, + { + "label": "PROMPT", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.planner_agent", + "description": "examples.financial_research_agent.agents.planner_agent", + "peekOfCode": "PROMPT = (\n \"You are a financial research planner. Given a request for financial analysis, \"\n \"produce a set of web searches to gather the context needed. Aim for recent \"\n \"headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. \"\n \"Output between 5 and 15 search terms to query for.\"\n)\nclass FinancialSearchItem(BaseModel):\n reason: str\n \"\"\"Your reasoning for why this search is relevant.\"\"\"\n query: str", + "detail": "examples.financial_research_agent.agents.planner_agent", + "documentation": {} + }, + { + "label": "planner_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.planner_agent", + "description": "examples.financial_research_agent.agents.planner_agent", + "peekOfCode": "planner_agent = Agent(\n name=\"FinancialPlannerAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=FinancialSearchPlan,\n)", + "detail": "examples.financial_research_agent.agents.planner_agent", + "documentation": {} + }, + { + "label": "AnalysisSummary", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.risk_agent", + "description": "examples.financial_research_agent.agents.risk_agent", + "peekOfCode": "class AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nrisk_agent = Agent(\n name=\"RiskAnalystAgent\",\n instructions=RISK_PROMPT,\n output_type=AnalysisSummary,\n)", + "detail": "examples.financial_research_agent.agents.risk_agent", + "documentation": {} + }, + { + "label": "RISK_PROMPT", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.risk_agent", + "description": "examples.financial_research_agent.agents.risk_agent", + "peekOfCode": "RISK_PROMPT = (\n \"You are a risk analyst looking for potential red flags in a company's outlook. \"\n \"Given background research, produce a short analysis of risks such as competitive threats, \"\n \"regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs.\"\n)\nclass AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nrisk_agent = Agent(\n name=\"RiskAnalystAgent\",", + "detail": "examples.financial_research_agent.agents.risk_agent", + "documentation": {} + }, + { + "label": "risk_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.risk_agent", + "description": "examples.financial_research_agent.agents.risk_agent", + "peekOfCode": "risk_agent = Agent(\n name=\"RiskAnalystAgent\",\n instructions=RISK_PROMPT,\n output_type=AnalysisSummary,\n)", + "detail": "examples.financial_research_agent.agents.risk_agent", + "documentation": {} + }, + { + "label": "INSTRUCTIONS", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.search_agent", + "description": "examples.financial_research_agent.agents.search_agent", + "peekOfCode": "INSTRUCTIONS = (\n \"You are a research assistant specializing in financial topics. \"\n \"Given a search term, use web search to retrieve up‑to‑date context and \"\n \"produce a short summary of at most 300 words. Focus on key numbers, events, \"\n \"or quotes that will be useful to a financial analyst.\"\n)\nsearch_agent = Agent(\n name=\"FinancialSearchAgent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],", + "detail": "examples.financial_research_agent.agents.search_agent", + "documentation": {} + }, + { + "label": "search_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.search_agent", + "description": "examples.financial_research_agent.agents.search_agent", + "peekOfCode": "search_agent = Agent(\n name=\"FinancialSearchAgent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],\n model_settings=ModelSettings(tool_choice=\"required\"),\n)", + "detail": "examples.financial_research_agent.agents.search_agent", + "documentation": {} + }, + { + "label": "VerificationResult", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.verifier_agent", + "description": "examples.financial_research_agent.agents.verifier_agent", + "peekOfCode": "class VerificationResult(BaseModel):\n verified: bool\n \"\"\"Whether the report seems coherent and plausible.\"\"\"\n issues: str\n \"\"\"If not verified, describe the main issues or concerns.\"\"\"\nverifier_agent = Agent(\n name=\"VerificationAgent\",\n instructions=VERIFIER_PROMPT,\n model=\"gpt-4o\",\n output_type=VerificationResult,", + "detail": "examples.financial_research_agent.agents.verifier_agent", + "documentation": {} + }, + { + "label": "VERIFIER_PROMPT", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.verifier_agent", + "description": "examples.financial_research_agent.agents.verifier_agent", + "peekOfCode": "VERIFIER_PROMPT = (\n \"You are a meticulous auditor. You have been handed a financial analysis report. \"\n \"Your job is to verify the report is internally consistent, clearly sourced, and makes \"\n \"no unsupported claims. Point out any issues or uncertainties.\"\n)\nclass VerificationResult(BaseModel):\n verified: bool\n \"\"\"Whether the report seems coherent and plausible.\"\"\"\n issues: str\n \"\"\"If not verified, describe the main issues or concerns.\"\"\"", + "detail": "examples.financial_research_agent.agents.verifier_agent", + "documentation": {} + }, + { + "label": "verifier_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.verifier_agent", + "description": "examples.financial_research_agent.agents.verifier_agent", + "peekOfCode": "verifier_agent = Agent(\n name=\"VerificationAgent\",\n instructions=VERIFIER_PROMPT,\n model=\"gpt-4o\",\n output_type=VerificationResult,\n)", + "detail": "examples.financial_research_agent.agents.verifier_agent", + "documentation": {} + }, + { + "label": "FinancialReportData", + "kind": 6, + "importPath": "examples.financial_research_agent.agents.writer_agent", + "description": "examples.financial_research_agent.agents.writer_agent", + "peekOfCode": "class FinancialReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2‑3 sentence executive summary.\"\"\"\n markdown_report: str\n \"\"\"The full markdown report.\"\"\"\n follow_up_questions: list[str]\n \"\"\"Suggested follow‑up questions for further research.\"\"\"\n# Note: We will attach handoffs to specialist analyst agents at runtime in the manager.\n# This shows how an agent can use handoffs to delegate to specialized subagents.\nwriter_agent = Agent(", + "detail": "examples.financial_research_agent.agents.writer_agent", + "documentation": {} + }, + { + "label": "WRITER_PROMPT", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.writer_agent", + "description": "examples.financial_research_agent.agents.writer_agent", + "peekOfCode": "WRITER_PROMPT = (\n \"You are a senior financial analyst. You will be provided with the original query and \"\n \"a set of raw search summaries. Your task is to synthesize these into a long‑form markdown \"\n \"report (at least several paragraphs) including a short executive summary and follow‑up \"\n \"questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, \"\n \"risk_analysis) to get short specialist write‑ups to incorporate.\"\n)\nclass FinancialReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2‑3 sentence executive summary.\"\"\"", + "detail": "examples.financial_research_agent.agents.writer_agent", + "documentation": {} + }, + { + "label": "writer_agent", + "kind": 5, + "importPath": "examples.financial_research_agent.agents.writer_agent", + "description": "examples.financial_research_agent.agents.writer_agent", + "peekOfCode": "writer_agent = Agent(\n name=\"FinancialWriterAgent\",\n instructions=WRITER_PROMPT,\n model=\"gpt-4.5-preview-2025-02-27\",\n output_type=FinancialReportData,\n)", + "detail": "examples.financial_research_agent.agents.writer_agent", + "documentation": {} + }, + { + "label": "FinancialResearchManager", + "kind": 6, + "importPath": "examples.financial_research_agent.manager", + "description": "examples.financial_research_agent.manager", + "peekOfCode": "class FinancialResearchManager:\n \"\"\"\n Orchestrates the full flow: planning, searching, sub‑analysis, writing, and verification.\n \"\"\"\n def __init__(self) -> None:\n self.console = Console()\n self.printer = Printer(self.console)\n async def run(self, query: str) -> None:\n trace_id = gen_trace_id()\n with trace(\"Financial research trace\", trace_id=trace_id):", + "detail": "examples.financial_research_agent.manager", + "documentation": {} + }, + { + "label": "Printer", + "kind": 6, + "importPath": "examples.financial_research_agent.printer", + "description": "examples.financial_research_agent.printer", + "peekOfCode": "class Printer:\n \"\"\"\n Simple wrapper to stream status updates. Used by the financial bot\n manager as it orchestrates planning, search and writing.\n \"\"\"\n def __init__(self, console: Console) -> None:\n self.live = Live(console=console)\n self.items: dict[str, tuple[str, bool]] = {}\n self.hide_done_ids: set[str] = set()\n self.live.start()", + "detail": "examples.financial_research_agent.printer", + "documentation": {} + }, + { + "label": "random_number_tool", + "kind": 2, + "importPath": "examples.handoffs.message_filter", + "description": "examples.handoffs.message_filter", + "peekOfCode": "def random_number_tool(max: int) -> int:\n \"\"\"Return a random integer between 0 and the given maximum.\"\"\"\n return random.randint(0, max)\ndef spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)", + "detail": "examples.handoffs.message_filter", + "documentation": {} + }, + { + "label": "spanish_handoff_message_filter", + "kind": 2, + "importPath": "examples.handoffs.message_filter", + "description": "examples.handoffs.message_filter", + "peekOfCode": "def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)\n else handoff_message_data.input_history\n )\n return HandoffInputData(", + "detail": "examples.handoffs.message_filter", + "documentation": {} + }, + { + "label": "first_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter", + "description": "examples.handoffs.message_filter", + "peekOfCode": "first_agent = Agent(\n name=\"Assistant\",\n instructions=\"Be extremely concise.\",\n tools=[random_number_tool],\n)\nspanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)", + "detail": "examples.handoffs.message_filter", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter", + "description": "examples.handoffs.message_filter", + "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)\nsecond_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),", + "detail": "examples.handoffs.message_filter", + "documentation": {} + }, + { + "label": "second_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter", + "description": "examples.handoffs.message_filter", + "peekOfCode": "second_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),\n handoffs=[handoff(spanish_agent, input_filter=spanish_handoff_message_filter)],\n)\nasync def main():\n # Trace the entire run as a single workflow\n with trace(workflow_name=\"Message filtering\"):", + "detail": "examples.handoffs.message_filter", + "documentation": {} + }, + { + "label": "random_number_tool", + "kind": 2, + "importPath": "examples.handoffs.message_filter_streaming", + "description": "examples.handoffs.message_filter_streaming", + "peekOfCode": "def random_number_tool(max: int) -> int:\n \"\"\"Return a random integer between 0 and the given maximum.\"\"\"\n return random.randint(0, max)\ndef spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)", + "detail": "examples.handoffs.message_filter_streaming", + "documentation": {} + }, + { + "label": "spanish_handoff_message_filter", + "kind": 2, + "importPath": "examples.handoffs.message_filter_streaming", + "description": "examples.handoffs.message_filter_streaming", + "peekOfCode": "def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)\n else handoff_message_data.input_history\n )\n return HandoffInputData(", + "detail": "examples.handoffs.message_filter_streaming", + "documentation": {} + }, + { + "label": "first_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter_streaming", + "description": "examples.handoffs.message_filter_streaming", + "peekOfCode": "first_agent = Agent(\n name=\"Assistant\",\n instructions=\"Be extremely concise.\",\n tools=[random_number_tool],\n)\nspanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)", + "detail": "examples.handoffs.message_filter_streaming", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter_streaming", + "description": "examples.handoffs.message_filter_streaming", + "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)\nsecond_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),", + "detail": "examples.handoffs.message_filter_streaming", + "documentation": {} + }, + { + "label": "second_agent", + "kind": 5, + "importPath": "examples.handoffs.message_filter_streaming", + "description": "examples.handoffs.message_filter_streaming", + "peekOfCode": "second_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),\n handoffs=[handoff(spanish_agent, input_filter=spanish_handoff_message_filter)],\n)\nasync def main():\n # Trace the entire run as a single workflow\n with trace(workflow_name=\"Streaming message filter\"):", + "detail": "examples.handoffs.message_filter_streaming", + "documentation": {} + }, + { + "label": "approval_callback", + "kind": 2, + "importPath": "examples.hosted_mcp.approvals", + "description": "examples.hosted_mcp.approvals", + "peekOfCode": "def approval_callback(request: MCPToolApprovalRequest) -> MCPToolApprovalFunctionResult:\n answer = input(f\"Approve running the tool `{request.data.name}`? (y/n) \")\n result: MCPToolApprovalFunctionResult = {\"approve\": answer == \"y\"}\n if not result[\"approve\"]:\n result[\"reason\"] = \"User denied\"\n return result\nasync def main(verbose: bool, stream: bool):\n agent = Agent(\n name=\"Assistant\",\n tools=[", + "detail": "examples.hosted_mcp.approvals", + "documentation": {} + }, + { + "label": "process_user_input", + "kind": 2, + "importPath": "examples.mcp.prompt_server.main", + "description": "examples.mcp.prompt_server.main", + "peekOfCode": "def process_user_input(user_input):\n command = f\"echo {user_input}\"\n os.system(command)\n return \"Command executed\"\n\"\"\"\n print(f\"Running: {message[:60]}...\")\n result = await Runner.run(starting_agent=agent, input=message)\n print(result.final_output)\n print(\"\\n\" + \"=\" * 50 + \"\\n\")\nasync def show_available_prompts(mcp_server: MCPServer):", + "detail": "examples.mcp.prompt_server.main", + "documentation": {} + }, + { + "label": "generate_code_review_instructions", + "kind": 2, + "importPath": "examples.mcp.prompt_server.server", + "description": "examples.mcp.prompt_server.server", + "peekOfCode": "def generate_code_review_instructions(\n focus: str = \"general code quality\", language: str = \"python\"\n) -> str:\n \"\"\"Generate agent instructions for code review tasks\"\"\"\n print(f\"[debug-server] generate_code_review_instructions({focus}, {language})\")\n return f\"\"\"You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}.\nINSTRUCTIONS:\n- Analyze code for quality, security, performance, and best practices\n- Provide specific, actionable feedback with examples\n- Identify potential bugs, vulnerabilities, and optimization opportunities", + "detail": "examples.mcp.prompt_server.server", + "documentation": {} + }, + { + "label": "mcp", + "kind": 5, + "importPath": "examples.mcp.prompt_server.server", + "description": "examples.mcp.prompt_server.server", + "peekOfCode": "mcp = FastMCP(\"Prompt Server\")\n# Instruction-generating prompts (user-controlled)\n@mcp.prompt()\ndef generate_code_review_instructions(\n focus: str = \"general code quality\", language: str = \"python\"\n) -> str:\n \"\"\"Generate agent instructions for code review tasks\"\"\"\n print(f\"[debug-server] generate_code_review_instructions({focus}, {language})\")\n return f\"\"\"You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}.\nINSTRUCTIONS:", + "detail": "examples.mcp.prompt_server.server", + "documentation": {} + }, + { + "label": "add", + "kind": 2, + "importPath": "examples.mcp.sse_example.server", + "description": "examples.mcp.sse_example.server", + "peekOfCode": "def add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:", + "detail": "examples.mcp.sse_example.server", + "documentation": {} + }, + { + "label": "get_secret_word", + "kind": 2, + "importPath": "examples.mcp.sse_example.server", + "description": "examples.mcp.sse_example.server", + "peekOfCode": "def get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":", + "detail": "examples.mcp.sse_example.server", + "documentation": {} + }, + { + "label": "get_current_weather", + "kind": 2, + "importPath": "examples.mcp.sse_example.server", + "description": "examples.mcp.sse_example.server", + "peekOfCode": "def get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":\n mcp.run(transport=\"sse\")", + "detail": "examples.mcp.sse_example.server", + "documentation": {} + }, + { + "label": "mcp", + "kind": 5, + "importPath": "examples.mcp.sse_example.server", + "description": "examples.mcp.sse_example.server", + "peekOfCode": "mcp = FastMCP(\"Echo Server\")\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])", + "detail": "examples.mcp.sse_example.server", + "documentation": {} + }, + { + "label": "add", + "kind": 2, + "importPath": "examples.mcp.streamablehttp_example.server", + "description": "examples.mcp.streamablehttp_example.server", + "peekOfCode": "def add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:", + "detail": "examples.mcp.streamablehttp_example.server", + "documentation": {} + }, + { + "label": "get_secret_word", + "kind": 2, + "importPath": "examples.mcp.streamablehttp_example.server", + "description": "examples.mcp.streamablehttp_example.server", + "peekOfCode": "def get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":", + "detail": "examples.mcp.streamablehttp_example.server", + "documentation": {} + }, + { + "label": "get_current_weather", + "kind": 2, + "importPath": "examples.mcp.streamablehttp_example.server", + "description": "examples.mcp.streamablehttp_example.server", + "peekOfCode": "def get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":\n mcp.run(transport=\"streamable-http\")", + "detail": "examples.mcp.streamablehttp_example.server", + "documentation": {} + }, + { + "label": "mcp", + "kind": 5, + "importPath": "examples.mcp.streamablehttp_example.server", + "description": "examples.mcp.streamablehttp_example.server", + "peekOfCode": "mcp = FastMCP(\"Echo Server\")\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])", + "detail": "examples.mcp.streamablehttp_example.server", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.model_providers.custom_example_agent", + "description": "examples.model_providers.custom_example_agent", + "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n # This agent will use the custom LLM provider\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),\n tools=[get_weather],", + "detail": "examples.model_providers.custom_example_agent", + "documentation": {} + }, + { + "label": "BASE_URL", + "kind": 5, + "importPath": "examples.model_providers.custom_example_agent", + "description": "examples.model_providers.custom_example_agent", + "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.", + "detail": "examples.model_providers.custom_example_agent", + "documentation": {} + }, + { + "label": "API_KEY", + "kind": 5, + "importPath": "examples.model_providers.custom_example_agent", + "description": "examples.model_providers.custom_example_agent", + "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.\n3. Set the `model` on the Agent.", + "detail": "examples.model_providers.custom_example_agent", + "documentation": {} + }, + { + "label": "MODEL_NAME", + "kind": 5, + "importPath": "examples.model_providers.custom_example_agent", + "description": "examples.model_providers.custom_example_agent", + "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.\n3. Set the `model` on the Agent.\nNote that in this example, we disable tracing under the assumption that you don't have an API key", + "detail": "examples.model_providers.custom_example_agent", + "documentation": {} + }, + { + "label": "client", + "kind": 5, + "importPath": "examples.model_providers.custom_example_agent", + "description": "examples.model_providers.custom_example_agent", + "peekOfCode": "client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)\nset_tracing_disabled(disabled=True)\n# An alternate approach that would also work:\n# PROVIDER = OpenAIProvider(openai_client=client)\n# agent = Agent(..., model=\"some-custom-model\")\n# Runner.run(agent, ..., run_config=RunConfig(model_provider=PROVIDER))\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"", + "detail": "examples.model_providers.custom_example_agent", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.model_providers.custom_example_global", + "description": "examples.model_providers.custom_example_global", + "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=MODEL_NAME,\n tools=[get_weather],\n )", + "detail": "examples.model_providers.custom_example_global", + "documentation": {} + }, + { + "label": "BASE_URL", + "kind": 5, + "importPath": "examples.model_providers.custom_example_global", + "description": "examples.model_providers.custom_example_global", + "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.", + "detail": "examples.model_providers.custom_example_global", + "documentation": {} + }, + { + "label": "API_KEY", + "kind": 5, + "importPath": "examples.model_providers.custom_example_global", + "description": "examples.model_providers.custom_example_global", + "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.\n3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.", + "detail": "examples.model_providers.custom_example_global", + "documentation": {} + }, + { + "label": "MODEL_NAME", + "kind": 5, + "importPath": "examples.model_providers.custom_example_global", + "description": "examples.model_providers.custom_example_global", + "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.\n3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.\nNote that in this example, we disable tracing under the assumption that you don't have an API key", + "detail": "examples.model_providers.custom_example_global", + "documentation": {} + }, + { + "label": "client", + "kind": 5, + "importPath": "examples.model_providers.custom_example_global", + "description": "examples.model_providers.custom_example_global", + "peekOfCode": "client = AsyncOpenAI(\n base_url=BASE_URL,\n api_key=API_KEY,\n)\nset_default_openai_client(client=client, use_for_tracing=False)\nset_default_openai_api(\"chat_completions\")\nset_tracing_disabled(disabled=True)\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")", + "detail": "examples.model_providers.custom_example_global", + "documentation": {} + }, + { + "label": "CustomModelProvider", + "kind": 6, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "class CustomModelProvider(ModelProvider):\n def get_model(self, model_name: str | None) -> Model:\n return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)\nCUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])\n # This will use the custom model provider\n result = await Runner.run(\n agent,\n \"What's the weather in Tokyo?\",\n run_config=RunConfig(model_provider=CUSTOM_MODEL_PROVIDER),", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "BASE_URL", + "kind": 5, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "API_KEY", + "kind": 5, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.\n2. Create a ModelProvider that uses the custom client.", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "MODEL_NAME", + "kind": 5, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.\n2. Create a ModelProvider that uses the custom client.\n3. Use the ModelProvider in calls to Runner.run(), only when we want to use the custom LLM provider.", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "client", + "kind": 5, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)\nset_tracing_disabled(disabled=True)\nclass CustomModelProvider(ModelProvider):\n def get_model(self, model_name: str | None) -> Model:\n return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)\nCUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "CUSTOM_MODEL_PROVIDER", + "kind": 5, + "importPath": "examples.model_providers.custom_example_provider", + "description": "examples.model_providers.custom_example_provider", + "peekOfCode": "CUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])\n # This will use the custom model provider\n result = await Runner.run(\n agent,", + "detail": "examples.model_providers.custom_example_provider", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.model_providers.litellm_auto", + "description": "examples.model_providers.litellm_auto", + "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n # We prefix with litellm/ to tell the Runner to use the LitellmModel\n model=\"litellm/anthropic/claude-3-5-sonnet-20240620\",\n tools=[get_weather],", + "detail": "examples.model_providers.litellm_auto", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.model_providers.litellm_provider", + "description": "examples.model_providers.litellm_provider", + "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main(model: str, api_key: str):\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=LitellmModel(model=model, api_key=api_key),\n tools=[get_weather],\n )", + "detail": "examples.model_providers.litellm_provider", + "documentation": {} + }, + { + "label": "Example", + "kind": 6, + "importPath": "examples.realtime.demo", + "description": "examples.realtime.demo", + "peekOfCode": "class Example:\n def __init__(self) -> None:\n self.ui = AppUI()\n self.ui.connected = asyncio.Event()\n self.ui.last_audio_item_id = None\n # Set the audio callback\n self.ui.set_audio_callback(self.on_audio_recorded)\n self.session: RealtimeSession | None = None\n async def run(self) -> None:\n # Start UI in a separate task instead of waiting for it to complete", + "detail": "examples.realtime.demo", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.realtime.demo", + "description": "examples.realtime.demo", + "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"\nagent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:", + "detail": "examples.realtime.demo", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.realtime.demo", + "description": "examples.realtime.demo", + "peekOfCode": "agent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:\n return s[:max_length] + \"...\"\n return s\nclass Example:", + "detail": "examples.realtime.demo", + "documentation": {} + }, + { + "label": "NoUIDemo", + "kind": 6, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "class NoUIDemo:\n def __init__(self) -> None:\n self.session: RealtimeSession | None = None\n self.audio_stream: sd.InputStream | None = None\n self.audio_player: sd.OutputStream | None = None\n self.recording = False\n async def run(self) -> None:\n print(\"Connecting, may take a few seconds...\")\n # Initialize audio player\n self.audio_player = sd.OutputStream(", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"\nagent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "CHUNK_LENGTH_S", + "kind": 5, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 50ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "SAMPLE_RATE", + "kind": 5, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "FORMAT", + "kind": 5, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "CHANNELS", + "kind": 5, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "CHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.realtime.no_ui_demo", + "description": "examples.realtime.no_ui_demo", + "peekOfCode": "agent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:\n return s[:max_length] + \"...\"\n return s\nclass NoUIDemo:", + "detail": "examples.realtime.no_ui_demo", + "documentation": {} + }, + { + "label": "Header", + "kind": 6, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "class Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "AudioStatusIndicator", + "kind": 6, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "class AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:\n status = (\n \"🔴 Conversation started.\"\n if self.is_recording\n else \"⚪ Press SPACE to start the conversation (q to quit)\"\n )", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "AppUI", + "kind": 6, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "class AppUI(App[None]):\n CSS = \"\"\"\n Screen {\n background: #1a1b26; /* Dark blue-grey background */\n }\n Container {\n border: double rgb(91, 164, 91);\n }\n #input-container {\n height: 5; /* Explicit height for input container */", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "CHUNK_LENGTH_S", + "kind": 5, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 50ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "SAMPLE_RATE", + "kind": 5, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "FORMAT", + "kind": 5, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "CHANNELS", + "kind": 5, + "importPath": "examples.realtime.ui", + "description": "examples.realtime.ui", + "peekOfCode": "CHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override", + "detail": "examples.realtime.ui", + "documentation": {} + }, + { + "label": "MODEL_NAME", + "kind": 5, + "importPath": "examples.reasoning_content.main", + "description": "examples.reasoning_content.main", + "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"deepseek-reasoner\"\nasync def stream_with_reasoning_content():\n \"\"\"\n Example of streaming a response from a model that provides reasoning content.\n The reasoning content will be emitted as separate events.\n \"\"\"\n provider = OpenAIProvider()\n model = provider.get_model(MODEL_NAME)\n print(\"\\n=== Streaming Example ===\")\n print(\"Prompt: Write a haiku about recursion in programming\")", + "detail": "examples.reasoning_content.main", + "documentation": {} + }, + { + "label": "MODEL_NAME", + "kind": 5, + "importPath": "examples.reasoning_content.runner_example", + "description": "examples.reasoning_content.runner_example", + "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"deepseek-reasoner\"\nasync def main():\n print(f\"Using model: {MODEL_NAME}\")\n # Create an agent with a model that supports reasoning content\n agent = Agent(\n name=\"Reasoning Agent\",\n instructions=\"You are a helpful assistant that explains your reasoning step by step.\",\n model=MODEL_NAME,\n )\n # Example 1: Non-streaming response", + "detail": "examples.reasoning_content.runner_example", + "documentation": {} + }, + { + "label": "WebSearchItem", + "kind": 6, + "importPath": "examples.research_bot.agents.planner_agent", + "description": "examples.research_bot.agents.planner_agent", + "peekOfCode": "class WebSearchItem(BaseModel):\n reason: str\n \"Your reasoning for why this search is important to the query.\"\n query: str\n \"The search term to use for the web search.\"\nclass WebSearchPlan(BaseModel):\n searches: list[WebSearchItem]\n \"\"\"A list of web searches to perform to best answer the query.\"\"\"\nplanner_agent = Agent(\n name=\"PlannerAgent\",", + "detail": "examples.research_bot.agents.planner_agent", + "documentation": {} + }, + { + "label": "WebSearchPlan", + "kind": 6, + "importPath": "examples.research_bot.agents.planner_agent", + "description": "examples.research_bot.agents.planner_agent", + "peekOfCode": "class WebSearchPlan(BaseModel):\n searches: list[WebSearchItem]\n \"\"\"A list of web searches to perform to best answer the query.\"\"\"\nplanner_agent = Agent(\n name=\"PlannerAgent\",\n instructions=PROMPT,\n model=\"gpt-4o\",\n output_type=WebSearchPlan,\n)", + "detail": "examples.research_bot.agents.planner_agent", + "documentation": {} + }, + { + "label": "PROMPT", + "kind": 5, + "importPath": "examples.research_bot.agents.planner_agent", + "description": "examples.research_bot.agents.planner_agent", + "peekOfCode": "PROMPT = (\n \"You are a helpful research assistant. Given a query, come up with a set of web searches \"\n \"to perform to best answer the query. Output between 5 and 20 terms to query for.\"\n)\nclass WebSearchItem(BaseModel):\n reason: str\n \"Your reasoning for why this search is important to the query.\"\n query: str\n \"The search term to use for the web search.\"\nclass WebSearchPlan(BaseModel):", + "detail": "examples.research_bot.agents.planner_agent", + "documentation": {} + }, + { + "label": "planner_agent", + "kind": 5, + "importPath": "examples.research_bot.agents.planner_agent", + "description": "examples.research_bot.agents.planner_agent", + "peekOfCode": "planner_agent = Agent(\n name=\"PlannerAgent\",\n instructions=PROMPT,\n model=\"gpt-4o\",\n output_type=WebSearchPlan,\n)", + "detail": "examples.research_bot.agents.planner_agent", + "documentation": {} + }, + { + "label": "INSTRUCTIONS", + "kind": 5, + "importPath": "examples.research_bot.agents.search_agent", + "description": "examples.research_bot.agents.search_agent", + "peekOfCode": "INSTRUCTIONS = (\n \"You are a research assistant. Given a search term, you search the web for that term and \"\n \"produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 \"\n \"words. Capture the main points. Write succinctly, no need to have complete sentences or good \"\n \"grammar. This will be consumed by someone synthesizing a report, so its vital you capture the \"\n \"essence and ignore any fluff. Do not include any additional commentary other than the summary \"\n \"itself.\"\n)\nsearch_agent = Agent(\n name=\"Search agent\",", + "detail": "examples.research_bot.agents.search_agent", + "documentation": {} + }, + { + "label": "search_agent", + "kind": 5, + "importPath": "examples.research_bot.agents.search_agent", + "description": "examples.research_bot.agents.search_agent", + "peekOfCode": "search_agent = Agent(\n name=\"Search agent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],\n model_settings=ModelSettings(tool_choice=\"required\"),\n)", + "detail": "examples.research_bot.agents.search_agent", + "documentation": {} + }, + { + "label": "ReportData", + "kind": 6, + "importPath": "examples.research_bot.agents.writer_agent", + "description": "examples.research_bot.agents.writer_agent", + "peekOfCode": "class ReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2-3 sentence summary of the findings.\"\"\"\n markdown_report: str\n \"\"\"The final report\"\"\"\n follow_up_questions: list[str]\n \"\"\"Suggested topics to research further\"\"\"\nwriter_agent = Agent(\n name=\"WriterAgent\",\n instructions=PROMPT,", + "detail": "examples.research_bot.agents.writer_agent", + "documentation": {} + }, + { + "label": "PROMPT", + "kind": 5, + "importPath": "examples.research_bot.agents.writer_agent", + "description": "examples.research_bot.agents.writer_agent", + "peekOfCode": "PROMPT = (\n \"You are a senior researcher tasked with writing a cohesive report for a research query. \"\n \"You will be provided with the original query, and some initial research done by a research \"\n \"assistant.\\n\"\n \"You should first come up with an outline for the report that describes the structure and \"\n \"flow of the report. Then, generate the report and return that as your final output.\\n\"\n \"The final output should be in markdown format, and it should be lengthy and detailed. Aim \"\n \"for 5-10 pages of content, at least 1000 words.\"\n)\nclass ReportData(BaseModel):", + "detail": "examples.research_bot.agents.writer_agent", + "documentation": {} + }, + { + "label": "writer_agent", + "kind": 5, + "importPath": "examples.research_bot.agents.writer_agent", + "description": "examples.research_bot.agents.writer_agent", + "peekOfCode": "writer_agent = Agent(\n name=\"WriterAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=ReportData,\n)", + "detail": "examples.research_bot.agents.writer_agent", + "documentation": {} + }, + { + "label": "ResearchManager", + "kind": 6, + "importPath": "examples.research_bot.manager", + "description": "examples.research_bot.manager", + "peekOfCode": "class ResearchManager:\n def __init__(self):\n self.console = Console()\n self.printer = Printer(self.console)\n async def run(self, query: str) -> None:\n trace_id = gen_trace_id()\n with trace(\"Research trace\", trace_id=trace_id):\n self.printer.update_item(\n \"trace_id\",\n f\"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\",", + "detail": "examples.research_bot.manager", + "documentation": {} + }, + { + "label": "Printer", + "kind": 6, + "importPath": "examples.research_bot.printer", + "description": "examples.research_bot.printer", + "peekOfCode": "class Printer:\n def __init__(self, console: Console):\n self.live = Live(console=console)\n self.items: dict[str, tuple[str, bool]] = {}\n self.hide_done_ids: set[str] = set()\n self.live.start()\n def end(self) -> None:\n self.live.stop()\n def hide_done_checkmark(self, item_id: str) -> None:\n self.hide_done_ids.add(item_id)", + "detail": "examples.research_bot.printer", + "documentation": {} + }, + { + "label": "LocalPlaywrightComputer", + "kind": 6, + "importPath": "examples.tools.computer_use", + "description": "examples.tools.computer_use", + "peekOfCode": "class LocalPlaywrightComputer(AsyncComputer):\n \"\"\"A computer, implemented using a local Playwright browser.\"\"\"\n def __init__(self):\n self._playwright: Union[Playwright, None] = None\n self._browser: Union[Browser, None] = None\n self._page: Union[Page, None] = None\n async def _get_browser_and_page(self) -> tuple[Browser, Page]:\n width, height = self.dimensions\n launch_args = [f\"--window-size={width},{height}\"]\n browser = await self.playwright.chromium.launch(headless=False, args=launch_args)", + "detail": "examples.tools.computer_use", + "documentation": {} + }, + { + "label": "CUA_KEY_TO_PLAYWRIGHT_KEY", + "kind": 5, + "importPath": "examples.tools.computer_use", + "description": "examples.tools.computer_use", + "peekOfCode": "CUA_KEY_TO_PLAYWRIGHT_KEY = {\n \"/\": \"Divide\",\n \"\\\\\": \"Backslash\",\n \"alt\": \"Alt\",\n \"arrowdown\": \"ArrowDown\",\n \"arrowleft\": \"ArrowLeft\",\n \"arrowright\": \"ArrowRight\",\n \"arrowup\": \"ArrowUp\",\n \"backspace\": \"Backspace\",\n \"capslock\": \"CapsLock\",", + "detail": "examples.tools.computer_use", + "documentation": {} + }, + { + "label": "open_file", + "kind": 2, + "importPath": "examples.tools.image_generator", + "description": "examples.tools.image_generator", + "peekOfCode": "def open_file(path: str) -> None:\n if sys.platform.startswith(\"darwin\"):\n subprocess.run([\"open\", path], check=False) # macOS\n elif os.name == \"nt\": # Windows\n os.astartfile(path) # type: ignore\n elif os.name == \"posix\":\n subprocess.run([\"xdg-open\", path], check=False) # Linux/Unix\n else:\n print(f\"Don't know how to open files on this platform: {sys.platform}\")\nasync def main():", + "detail": "examples.tools.image_generator", + "documentation": {} + }, + { + "label": "WorkflowCallbacks", + "kind": 6, + "importPath": "examples.voice.static.main", + "description": "examples.voice.static.main", + "peekOfCode": "class WorkflowCallbacks(SingleAgentWorkflowCallbacks):\n def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:\n print(f\"[debug] on_run called with transcription: {transcription}\")\nasync def main():\n pipeline = VoicePipeline(\n workflow=SingleAgentVoiceWorkflow(agent, callbacks=WorkflowCallbacks())\n )\n audio_input = AudioInput(buffer=record_audio())\n result = await pipeline.run(audio_input)\n with AudioPlayer() as player:", + "detail": "examples.voice.static.main", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.voice.static.main", + "description": "examples.voice.static.main", + "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\"\"\"\n print(f\"[debug] get_weather called with city: {city}\")\n choices = [\"sunny\", \"cloudy\", \"rainy\", \"snowy\"]\n return f\"The weather in {city} is {random.choice(choices)}.\"\nspanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",", + "detail": "examples.voice.static.main", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.voice.static.main", + "description": "examples.voice.static.main", + "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",\n ),\n model=\"gpt-4o-mini\",\n)\nagent = Agent(\n name=\"Assistant\",", + "detail": "examples.voice.static.main", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.voice.static.main", + "description": "examples.voice.static.main", + "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.\",\n ),\n model=\"gpt-4o-mini\",\n handoffs=[spanish_agent],\n tools=[get_weather],\n)\nclass WorkflowCallbacks(SingleAgentWorkflowCallbacks):", + "detail": "examples.voice.static.main", + "documentation": {} + }, + { + "label": "AudioPlayer", + "kind": 6, + "importPath": "examples.voice.static.util", + "description": "examples.voice.static.util", + "peekOfCode": "class AudioPlayer:\n def __enter__(self):\n self.stream = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)\n self.stream.start()\n return self\n def __exit__(self, exc_type, exc_value, traceback):\n self.stream.stop() # wait for the stream to finish\n self.stream.close()\n def add_audio(self, audio_data: npt.NDArray[np.int16]):\n self.stream.write(audio_data)", + "detail": "examples.voice.static.util", + "documentation": {} + }, + { + "label": "record_audio", + "kind": 2, + "importPath": "examples.voice.static.util", + "description": "examples.voice.static.util", + "peekOfCode": "def record_audio():\n # Using curses to record audio in a way that:\n # - doesn't require accessibility permissions on macos\n # - doesn't block the terminal\n audio_data = curses.wrapper(_record_audio)\n return audio_data\nclass AudioPlayer:\n def __enter__(self):\n self.stream = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)\n self.stream.start()", + "detail": "examples.voice.static.util", + "documentation": {} + }, + { + "label": "Header", + "kind": 6, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "class Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "AudioStatusIndicator", + "kind": 6, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "class AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:\n status = (\n \"🔴 Recording... (Press K to stop)\"\n if self.is_recording\n else \"⚪ Press K to start recording (Q to quit)\"\n )", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "RealtimeApp", + "kind": 6, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "class RealtimeApp(App[None]):\n CSS = \"\"\"\n Screen {\n background: #1a1b26; /* Dark blue-grey background */\n }\n Container {\n border: double rgb(91, 164, 91);\n }\n Horizontal {\n width: 100%;", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "CHUNK_LENGTH_S", + "kind": 5, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 100ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "SAMPLE_RATE", + "kind": 5, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "FORMAT", + "kind": 5, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "CHANNELS", + "kind": 5, + "importPath": "examples.voice.streamed.main", + "description": "examples.voice.streamed.main", + "peekOfCode": "CHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)", + "detail": "examples.voice.streamed.main", + "documentation": {} + }, + { + "label": "MyWorkflow", + "kind": 6, + "importPath": "examples.voice.streamed.my_workflow", + "description": "examples.voice.streamed.my_workflow", + "peekOfCode": "class MyWorkflow(VoiceWorkflowBase):\n def __init__(self, secret_word: str, on_start: Callable[[str], None]):\n \"\"\"\n Args:\n secret_word: The secret word to guess.\n on_start: A callback that is called when the workflow starts. The transcription\n is passed in as an argument.\n \"\"\"\n self._input_history: list[TResponseInputItem] = []\n self._current_agent = agent", + "detail": "examples.voice.streamed.my_workflow", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "examples.voice.streamed.my_workflow", + "description": "examples.voice.streamed.my_workflow", + "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\"\"\"\n print(f\"[debug] get_weather called with city: {city}\")\n choices = [\"sunny\", \"cloudy\", \"rainy\", \"snowy\"]\n return f\"The weather in {city} is {random.choice(choices)}.\"\nspanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",", + "detail": "examples.voice.streamed.my_workflow", + "documentation": {} + }, + { + "label": "spanish_agent", + "kind": 5, + "importPath": "examples.voice.streamed.my_workflow", + "description": "examples.voice.streamed.my_workflow", + "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",\n ),\n model=\"gpt-4o-mini\",\n)\nagent = Agent(\n name=\"Assistant\",", + "detail": "examples.voice.streamed.my_workflow", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "examples.voice.streamed.my_workflow", + "description": "examples.voice.streamed.my_workflow", + "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.\",\n ),\n model=\"gpt-4o-mini\",\n handoffs=[spanish_agent],\n tools=[get_weather],\n)\nclass MyWorkflow(VoiceWorkflowBase):", + "detail": "examples.voice.streamed.my_workflow", + "documentation": {} + }, + { + "label": "LitellmModel", + "kind": 6, + "importPath": "src.agents.extensions.models.litellm_model", + "description": "src.agents.extensions.models.litellm_model", + "peekOfCode": "class LitellmModel(Model):\n \"\"\"This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,\n Anthropic, Gemini, Mistral, and many other models.\n See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).\n \"\"\"\n def __init__(\n self,\n model: str,\n base_url: str | None = None,\n api_key: str | None = None,", + "detail": "src.agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmConverter", + "kind": 6, + "importPath": "src.agents.extensions.models.litellm_model", + "description": "src.agents.extensions.models.litellm_model", + "peekOfCode": "class LitellmConverter:\n @classmethod\n def convert_message_to_openai(\n cls, message: litellm.types.utils.Message\n ) -> ChatCompletionMessage:\n if message.role != \"assistant\":\n raise ModelBehaviorError(f\"Unsupported role: {message.role}\")\n tool_calls = (\n [LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]\n if message.tool_calls", + "detail": "src.agents.extensions.models.litellm_model", + "documentation": {} + }, + { + "label": "LitellmProvider", + "kind": 6, + "importPath": "src.agents.extensions.models.litellm_provider", + "description": "src.agents.extensions.models.litellm_provider", + "peekOfCode": "class LitellmProvider(ModelProvider):\n \"\"\"A ModelProvider that uses LiteLLM to route to any model provider. You can use it via:\n ```python\n Runner.run(agent, input, run_config=RunConfig(model_provider=LitellmProvider()))\n ```\n See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).\n NOTE: API keys must be set via environment variables. If you're using models that require\n additional configuration (e.g. Azure API base or version), those must also be set via the\n environment variables that LiteLLM expects. If you have more advanced needs, we recommend\n copy-pasting this class and making any modifications you need.", + "detail": "src.agents.extensions.models.litellm_provider", + "documentation": {} + }, + { + "label": "remove_all_tools", + "kind": 2, + "importPath": "src.agents.extensions.handoff_filters", + "description": "src.agents.extensions.handoff_filters", + "peekOfCode": "def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData:\n \"\"\"Filters out all tool items: file search, web search and function calls+output.\"\"\"\n history = handoff_input_data.input_history\n new_items = handoff_input_data.new_items\n filtered_history = (\n _remove_tool_types_from_input(history) if isinstance(history, tuple) else history\n )\n filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items)\n filtered_new_items = _remove_tools_from_items(new_items)\n return HandoffInputData(", + "detail": "src.agents.extensions.handoff_filters", + "documentation": {} + }, + { + "label": "prompt_with_handoff_instructions", + "kind": 2, + "importPath": "src.agents.extensions.handoff_prompt", + "description": "src.agents.extensions.handoff_prompt", + "peekOfCode": "def prompt_with_handoff_instructions(prompt: str) -> str:\n \"\"\"\n Add recommended instructions to the prompt for agents that use handoffs.\n \"\"\"\n return f\"{RECOMMENDED_PROMPT_PREFIX}\\n\\n{prompt}\"", + "detail": "src.agents.extensions.handoff_prompt", + "documentation": {} + }, + { + "label": "RECOMMENDED_PROMPT_PREFIX", + "kind": 5, + "importPath": "src.agents.extensions.handoff_prompt", + "description": "src.agents.extensions.handoff_prompt", + "peekOfCode": "RECOMMENDED_PROMPT_PREFIX = (\n \"# System context\\n\"\n \"You are part of a multi-agent system called the Agents SDK, designed to make agent \"\n \"coordination and execution easy. Agents uses two primary abstraction: **Agents** and \"\n \"**Handoffs**. An agent encompasses instructions and tools and can hand off a \"\n \"conversation to another agent when appropriate. \"\n \"Handoffs are achieved by calling a handoff function, generally named \"\n \"`transfer_to_`. Transfers between agents are handled seamlessly in the background;\"\n \" do not mention or draw attention to these transfers in your conversation with the user.\\n\"\n)", + "detail": "src.agents.extensions.handoff_prompt", + "documentation": {} + }, + { + "label": "get_main_graph", + "kind": 2, + "importPath": "src.agents.extensions.visualization", + "description": "src.agents.extensions.visualization", + "peekOfCode": "def get_main_graph(agent: Agent) -> str:\n \"\"\"\n Generates the main graph structure in DOT format for the given agent.\n Args:\n agent (Agent): The agent for which the graph is to be generated.\n Returns:\n str: The DOT format string representing the graph.\n \"\"\"\n parts = [\n \"\"\"", + "detail": "src.agents.extensions.visualization", + "documentation": {} + }, + { + "label": "get_all_nodes", + "kind": 2, + "importPath": "src.agents.extensions.visualization", + "description": "src.agents.extensions.visualization", + "peekOfCode": "def get_all_nodes(\n agent: Agent, parent: Agent | None = None, visited: set[str] | None = None\n) -> str:\n \"\"\"\n Recursively generates the nodes for the given agent and its handoffs in DOT format.\n Args:\n agent (Agent): The agent for which the nodes are to be generated.\n Returns:\n str: The DOT format string representing the nodes.\n \"\"\"", + "detail": "src.agents.extensions.visualization", + "documentation": {} + }, + { + "label": "get_all_edges", + "kind": 2, + "importPath": "src.agents.extensions.visualization", + "description": "src.agents.extensions.visualization", + "peekOfCode": "def get_all_edges(\n agent: Agent, parent: Agent | None = None, visited: set[str] | None = None\n) -> str:\n \"\"\"\n Recursively generates the edges for the given agent and its handoffs in DOT format.\n Args:\n agent (Agent): The agent for which the edges are to be generated.\n parent (Agent, optional): The parent agent. Defaults to None.\n Returns:\n str: The DOT format string representing the edges.", + "detail": "src.agents.extensions.visualization", + "documentation": {} + }, + { + "label": "draw_graph", + "kind": 2, + "importPath": "src.agents.extensions.visualization", + "description": "src.agents.extensions.visualization", + "peekOfCode": "def draw_graph(agent: Agent, filename: str | None = None) -> graphviz.Source:\n \"\"\"\n Draws the graph for the given agent and optionally saves it as a PNG file.\n Args:\n agent (Agent): The agent for which the graph is to be drawn.\n filename (str): The name of the file to save the graph as a PNG.\n Returns:\n graphviz.Source: The graphviz Source object representing the graph.\n \"\"\"\n dot_code = get_main_graph(agent)", + "detail": "src.agents.extensions.visualization", + "documentation": {} + }, + { + "label": "MCPServer", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServer(abc.ABC):\n \"\"\"Base class for Model Context Protocol servers.\"\"\"\n @abc.abstractmethod\n async def connect(self):\n \"\"\"Connect to the server. For example, this might mean spawning a subprocess or\n opening a network connection. The server is expected to remain connected until\n `cleanup()` is called.\n \"\"\"\n pass\n @property", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "_MCPServerWithClientSession", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class _MCPServerWithClientSession(MCPServer, abc.ABC):\n \"\"\"Base class for MCP servers that use a `ClientSession` to communicate with the server.\"\"\"\n def __init__(\n self,\n cache_tools_list: bool,\n client_session_timeout_seconds: float | None,\n tool_filter: ToolFilter = None,\n ):\n \"\"\"\n Args:", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerStdioParams", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerStdioParams(TypedDict):\n \"\"\"Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another\n import.\n \"\"\"\n command: str\n \"\"\"The executable to run to start the server. For example, `python` or `node`.\"\"\"\n args: NotRequired[list[str]]\n \"\"\"Command line args to pass to the `command` executable. For example, `['foo.py']` or\n `['server.js', '--port', '8080']`.\"\"\"\n env: NotRequired[dict[str, str]]", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerStdio", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerStdio(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the stdio transport. See the [spec]\n (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for\n details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerStdioParams,\n cache_tools_list: bool = False,\n name: str | None = None,", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerSseParams", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerSseParams(TypedDict):\n \"\"\"Mirrors the params in`mcp.client.sse.sse_client`.\"\"\"\n url: str\n \"\"\"The URL of the server.\"\"\"\n headers: NotRequired[dict[str, str]]\n \"\"\"The headers to send to the server.\"\"\"\n timeout: NotRequired[float]\n \"\"\"The timeout for the HTTP request. Defaults to 5 seconds.\"\"\"\n sse_read_timeout: NotRequired[float]\n \"\"\"The timeout for the SSE connection, in seconds. Defaults to 5 minutes.\"\"\"", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerSse", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerSse(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the HTTP with SSE transport. See the [spec]\n (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse)\n for details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerSseParams,\n cache_tools_list: bool = False,\n name: str | None = None,", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerStreamableHttpParams", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerStreamableHttpParams(TypedDict):\n \"\"\"Mirrors the params in`mcp.client.streamable_http.streamablehttp_client`.\"\"\"\n url: str\n \"\"\"The URL of the server.\"\"\"\n headers: NotRequired[dict[str, str]]\n \"\"\"The headers to send to the server.\"\"\"\n timeout: NotRequired[timedelta | float]\n \"\"\"The timeout for the HTTP request. Defaults to 5 seconds.\"\"\"\n sse_read_timeout: NotRequired[timedelta | float]\n \"\"\"The timeout for the SSE connection, in seconds. Defaults to 5 minutes.\"\"\"", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "MCPServerStreamableHttp", + "kind": 6, + "importPath": "src.agents.mcp.server", + "description": "src.agents.mcp.server", + "peekOfCode": "class MCPServerStreamableHttp(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the Streamable HTTP transport. See the [spec]\n (https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http)\n for details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerStreamableHttpParams,\n cache_tools_list: bool = False,\n name: str | None = None,", + "detail": "src.agents.mcp.server", + "documentation": {} + }, + { + "label": "ToolFilterContext", + "kind": 6, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "class ToolFilterContext:\n \"\"\"Context information available to tool filter functions.\"\"\"\n run_context: RunContextWrapper[Any]\n \"\"\"The current run context.\"\"\"\n agent: \"AgentBase\"\n \"\"\"The agent that is requesting the tool list.\"\"\"\n server_name: str\n \"\"\"The name of the MCP server.\"\"\"\nToolFilterCallable = Callable[[\"ToolFilterContext\", \"MCPTool\"], MaybeAwaitable[bool]]\n\"\"\"A function that determines whether a tool should be available.", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "ToolFilterStatic", + "kind": 6, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "class ToolFilterStatic(TypedDict):\n \"\"\"Static tool filter configuration using allowlists and blocklists.\"\"\"\n allowed_tool_names: NotRequired[list[str]]\n \"\"\"Optional list of tool names to allow (whitelist).\n If set, only these tools will be available.\"\"\"\n blocked_tool_names: NotRequired[list[str]]\n \"\"\"Optional list of tool names to exclude (blacklist).\n If set, these tools will be filtered out.\"\"\"\nToolFilter = Union[ToolFilterCallable, ToolFilterStatic, None]\n\"\"\"A tool filter that can be either a function, static configuration, or None (no filtering).\"\"\"", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "MCPUtil", + "kind": 6, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "class MCPUtil:\n \"\"\"Set of utilities for interop between MCP and Agents SDK tools.\"\"\"\n @classmethod\n async def get_all_function_tools(\n cls,\n servers: list[\"MCPServer\"],\n convert_schemas_to_strict: bool,\n run_context: RunContextWrapper[Any],\n agent: \"AgentBase\",\n ) -> list[Tool]:", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "create_static_tool_filter", + "kind": 2, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "def create_static_tool_filter(\n allowed_tool_names: Optional[list[str]] = None,\n blocked_tool_names: Optional[list[str]] = None,\n) -> Optional[ToolFilterStatic]:\n \"\"\"Create a static tool filter from allowlist and blocklist parameters.\n This is a convenience function for creating a ToolFilterStatic.\n Args:\n allowed_tool_names: Optional list of tool names to allow (whitelist).\n blocked_tool_names: Optional list of tool names to exclude (blacklist).\n Returns:", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "ToolFilterCallable", + "kind": 5, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "ToolFilterCallable = Callable[[\"ToolFilterContext\", \"MCPTool\"], MaybeAwaitable[bool]]\n\"\"\"A function that determines whether a tool should be available.\nArgs:\n context: The context information including run context, agent, and server name.\n tool: The MCP tool to filter.\nReturns:\n Whether the tool should be available (True) or filtered out (False).\n\"\"\"\nclass ToolFilterStatic(TypedDict):\n \"\"\"Static tool filter configuration using allowlists and blocklists.\"\"\"", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "ToolFilter", + "kind": 5, + "importPath": "src.agents.mcp.util", + "description": "src.agents.mcp.util", + "peekOfCode": "ToolFilter = Union[ToolFilterCallable, ToolFilterStatic, None]\n\"\"\"A tool filter that can be either a function, static configuration, or None (no filtering).\"\"\"\ndef create_static_tool_filter(\n allowed_tool_names: Optional[list[str]] = None,\n blocked_tool_names: Optional[list[str]] = None,\n) -> Optional[ToolFilterStatic]:\n \"\"\"Create a static tool filter from allowlist and blocklist parameters.\n This is a convenience function for creating a ToolFilterStatic.\n Args:\n allowed_tool_names: Optional list of tool names to allow (whitelist).", + "detail": "src.agents.mcp.util", + "documentation": {} + }, + { + "label": "Session", + "kind": 6, + "importPath": "src.agents.memory.session", + "description": "src.agents.memory.session", + "peekOfCode": "class Session(Protocol):\n \"\"\"Protocol for session implementations.\n Session stores conversation history for a specific session, allowing\n agents to maintain context without requiring explicit manual memory management.\n \"\"\"\n session_id: str\n async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:\n \"\"\"Retrieve the conversation history for this session.\n Args:\n limit: Maximum number of items to retrieve. If None, retrieves all items.", + "detail": "src.agents.memory.session", + "documentation": {} + }, + { + "label": "SessionABC", + "kind": 6, + "importPath": "src.agents.memory.session", + "description": "src.agents.memory.session", + "peekOfCode": "class SessionABC(ABC):\n \"\"\"Abstract base class for session implementations.\n Session stores conversation history for a specific session, allowing\n agents to maintain context without requiring explicit manual memory management.\n This ABC is intended for internal use and as a base class for concrete implementations.\n Third-party libraries should implement the Session protocol instead.\n \"\"\"\n session_id: str\n @abstractmethod\n async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:", + "detail": "src.agents.memory.session", + "documentation": {} + }, + { + "label": "SQLiteSession", + "kind": 6, + "importPath": "src.agents.memory.session", + "description": "src.agents.memory.session", + "peekOfCode": "class SQLiteSession(SessionABC):\n \"\"\"SQLite-based implementation of session storage.\n This implementation stores conversation history in a SQLite database.\n By default, uses an in-memory database that is lost when the process ends.\n For persistent storage, provide a file path.\n \"\"\"\n def __init__(\n self,\n session_id: str,\n db_path: str | Path = \":memory:\",", + "detail": "src.agents.memory.session", + "documentation": {} + }, + { + "label": "set_default_openai_key", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def set_default_openai_key(key: str) -> None:\n global _default_openai_key\n _default_openai_key = key\ndef get_default_openai_key() -> str | None:\n return _default_openai_key\ndef set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "get_default_openai_key", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def get_default_openai_key() -> str | None:\n return _default_openai_key\ndef set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "set_default_openai_client", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "get_default_openai_client", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "set_use_responses_by_default", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "get_use_responses_by_default", + "kind": 2, + "importPath": "src.agents.models._openai_shared", + "description": "src.agents.models._openai_shared", + "peekOfCode": "def get_use_responses_by_default() -> bool:\n return _use_responses_by_default", + "detail": "src.agents.models._openai_shared", + "documentation": {} + }, + { + "label": "Converter", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_converter", + "description": "src.agents.models.chatcmpl_converter", + "peekOfCode": "class Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> ChatCompletionToolChoiceOptionParam | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN\n elif isinstance(tool_choice, MCPToolChoice):\n raise UserError(\"MCPToolChoice is not supported for Chat Completions models\")\n elif tool_choice == \"auto\":", + "detail": "src.agents.models.chatcmpl_converter", + "documentation": {} + }, + { + "label": "ChatCmplHelpers", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_helpers", + "description": "src.agents.models.chatcmpl_helpers", + "peekOfCode": "class ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None\n return model_settings.store if model_settings.store is not None else default_store\n @classmethod", + "detail": "src.agents.models.chatcmpl_helpers", + "documentation": {} + }, + { + "label": "_USER_AGENT", + "kind": 5, + "importPath": "src.agents.models.chatcmpl_helpers", + "description": "src.agents.models.chatcmpl_helpers", + "peekOfCode": "_USER_AGENT = f\"Agents/Python {__version__}\"\nHEADERS = {\"User-Agent\": _USER_AGENT}\nclass ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None", + "detail": "src.agents.models.chatcmpl_helpers", + "documentation": {} + }, + { + "label": "HEADERS", + "kind": 5, + "importPath": "src.agents.models.chatcmpl_helpers", + "description": "src.agents.models.chatcmpl_helpers", + "peekOfCode": "HEADERS = {\"User-Agent\": _USER_AGENT}\nclass ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None\n return model_settings.store if model_settings.store is not None else default_store", + "detail": "src.agents.models.chatcmpl_helpers", + "documentation": {} + }, + { + "label": "Part", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_stream_handler", + "description": "src.agents.models.chatcmpl_stream_handler", + "peekOfCode": "class Part:\n def __init__(self, text: str, type: str):\n self.text = text\n self.type = type\n@dataclass\nclass StreamingState:\n started: bool = False\n text_content_index_and_output: tuple[int, ResponseOutputText] | None = None\n refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None\n reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None", + "detail": "src.agents.models.chatcmpl_stream_handler", + "documentation": {} + }, + { + "label": "StreamingState", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_stream_handler", + "description": "src.agents.models.chatcmpl_stream_handler", + "peekOfCode": "class StreamingState:\n started: bool = False\n text_content_index_and_output: tuple[int, ResponseOutputText] | None = None\n refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None\n reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None\n function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)\nclass SequenceNumber:\n def __init__(self):\n self._sequence_number = 0\n def get_and_increment(self) -> int:", + "detail": "src.agents.models.chatcmpl_stream_handler", + "documentation": {} + }, + { + "label": "SequenceNumber", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_stream_handler", + "description": "src.agents.models.chatcmpl_stream_handler", + "peekOfCode": "class SequenceNumber:\n def __init__(self):\n self._sequence_number = 0\n def get_and_increment(self) -> int:\n num = self._sequence_number\n self._sequence_number += 1\n return num\nclass ChatCmplStreamHandler:\n @classmethod\n async def handle_stream(", + "detail": "src.agents.models.chatcmpl_stream_handler", + "documentation": {} + }, + { + "label": "ChatCmplStreamHandler", + "kind": 6, + "importPath": "src.agents.models.chatcmpl_stream_handler", + "description": "src.agents.models.chatcmpl_stream_handler", + "peekOfCode": "class ChatCmplStreamHandler:\n @classmethod\n async def handle_stream(\n cls,\n response: Response,\n stream: AsyncStream[ChatCompletionChunk],\n ) -> AsyncIterator[TResponseStreamEvent]:\n usage: CompletionUsage | None = None\n state = StreamingState()\n sequence_number = SequenceNumber()", + "detail": "src.agents.models.chatcmpl_stream_handler", + "documentation": {} + }, + { + "label": "FAKE_RESPONSES_ID", + "kind": 5, + "importPath": "src.agents.models.fake_id", + "description": "src.agents.models.fake_id", + "peekOfCode": "FAKE_RESPONSES_ID = \"__fake_id__\"\n\"\"\"This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's\nuseful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat\nCompletions API or other LLM providers.\n\"\"\"", + "detail": "src.agents.models.fake_id", + "documentation": {} + }, + { + "label": "ModelTracing", + "kind": 6, + "importPath": "src.agents.models.interface", + "description": "src.agents.models.interface", + "peekOfCode": "class ModelTracing(enum.Enum):\n DISABLED = 0\n \"\"\"Tracing is disabled entirely.\"\"\"\n ENABLED = 1\n \"\"\"Tracing is enabled, and all data is included.\"\"\"\n ENABLED_WITHOUT_DATA = 2\n \"\"\"Tracing is enabled, but inputs/outputs are not included.\"\"\"\n def is_disabled(self) -> bool:\n return self == ModelTracing.DISABLED\n def include_data(self) -> bool:", + "detail": "src.agents.models.interface", + "documentation": {} + }, + { + "label": "Model", + "kind": 6, + "importPath": "src.agents.models.interface", + "description": "src.agents.models.interface", + "peekOfCode": "class Model(abc.ABC):\n \"\"\"The base interface for calling an LLM.\"\"\"\n @abc.abstractmethod\n async def get_response(\n self,\n system_instructions: str | None,\n input: str | list[TResponseInputItem],\n model_settings: ModelSettings,\n tools: list[Tool],\n output_schema: AgentOutputSchemaBase | None,", + "detail": "src.agents.models.interface", + "documentation": {} + }, + { + "label": "ModelProvider", + "kind": 6, + "importPath": "src.agents.models.interface", + "description": "src.agents.models.interface", + "peekOfCode": "class ModelProvider(abc.ABC):\n \"\"\"The base interface for a model provider.\n Model provider is responsible for looking up Models by name.\n \"\"\"\n @abc.abstractmethod\n def get_model(self, model_name: str | None) -> Model:\n \"\"\"Get a model by name.\n Args:\n model_name: The name of the model to get.\n Returns:", + "detail": "src.agents.models.interface", + "documentation": {} + }, + { + "label": "MultiProviderMap", + "kind": 6, + "importPath": "src.agents.models.multi_provider", + "description": "src.agents.models.multi_provider", + "peekOfCode": "class MultiProviderMap:\n \"\"\"A map of model name prefixes to ModelProviders.\"\"\"\n def __init__(self):\n self._mapping: dict[str, ModelProvider] = {}\n def has_prefix(self, prefix: str) -> bool:\n \"\"\"Returns True if the given prefix is in the mapping.\"\"\"\n return prefix in self._mapping\n def get_mapping(self) -> dict[str, ModelProvider]:\n \"\"\"Returns a copy of the current prefix -> ModelProvider mapping.\"\"\"\n return self._mapping.copy()", + "detail": "src.agents.models.multi_provider", + "documentation": {} + }, + { + "label": "MultiProvider", + "kind": 6, + "importPath": "src.agents.models.multi_provider", + "description": "src.agents.models.multi_provider", + "peekOfCode": "class MultiProvider(ModelProvider):\n \"\"\"This ModelProvider maps to a Model based on the prefix of the model name. By default, the\n mapping is:\n - \"openai/\" prefix or no prefix -> OpenAIProvider. e.g. \"openai/gpt-4.1\", \"gpt-4.1\"\n - \"litellm/\" prefix -> LitellmProvider. e.g. \"litellm/openai/gpt-4.1\"\n You can override or customize this mapping.\n \"\"\"\n def __init__(\n self,\n *,", + "detail": "src.agents.models.multi_provider", + "documentation": {} + }, + { + "label": "OpenAIChatCompletionsModel", + "kind": 6, + "importPath": "src.agents.models.openai_chatcompletions", + "description": "src.agents.models.openai_chatcompletions", + "peekOfCode": "class OpenAIChatCompletionsModel(Model):\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:\n self.model = model\n self._client = openai_client\n def _non_null_or_not_given(self, value: Any) -> Any:\n return value if value is not None else NOT_GIVEN", + "detail": "src.agents.models.openai_chatcompletions", + "documentation": {} + }, + { + "label": "OpenAIProvider", + "kind": 6, + "importPath": "src.agents.models.openai_provider", + "description": "src.agents.models.openai_provider", + "peekOfCode": "class OpenAIProvider(ModelProvider):\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,\n project: str | None = None,\n use_responses: bool | None = None,", + "detail": "src.agents.models.openai_provider", + "documentation": {} + }, + { + "label": "shared_http_client", + "kind": 2, + "importPath": "src.agents.models.openai_provider", + "description": "src.agents.models.openai_provider", + "peekOfCode": "def shared_http_client() -> httpx.AsyncClient:\n global _http_client\n if _http_client is None:\n _http_client = DefaultAsyncHttpxClient()\n return _http_client\nclass OpenAIProvider(ModelProvider):\n def __init__(\n self,\n *,\n api_key: str | None = None,", + "detail": "src.agents.models.openai_provider", + "documentation": {} + }, + { + "label": "OpenAIResponsesModel", + "kind": 6, + "importPath": "src.agents.models.openai_responses", + "description": "src.agents.models.openai_responses", + "peekOfCode": "class OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:\n self.model = model", + "detail": "src.agents.models.openai_responses", + "documentation": {} + }, + { + "label": "ConvertedTools", + "kind": 6, + "importPath": "src.agents.models.openai_responses", + "description": "src.agents.models.openai_responses", + "peekOfCode": "class ConvertedTools:\n tools: list[ToolParam]\n includes: list[ResponseIncludable]\nclass Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> response_create_params.ToolChoice | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN", + "detail": "src.agents.models.openai_responses", + "documentation": {} + }, + { + "label": "Converter", + "kind": 6, + "importPath": "src.agents.models.openai_responses", + "description": "src.agents.models.openai_responses", + "peekOfCode": "class Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> response_create_params.ToolChoice | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN\n elif isinstance(tool_choice, MCPToolChoice):\n return {\n \"server_label\": tool_choice.server_label,", + "detail": "src.agents.models.openai_responses", + "documentation": {} + }, + { + "label": "_USER_AGENT", + "kind": 5, + "importPath": "src.agents.models.openai_responses", + "description": "src.agents.models.openai_responses", + "peekOfCode": "_USER_AGENT = f\"Agents/Python {__version__}\"\n_HEADERS = {\"User-Agent\": _USER_AGENT}\nclass OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,", + "detail": "src.agents.models.openai_responses", + "documentation": {} + }, + { + "label": "_HEADERS", + "kind": 5, + "importPath": "src.agents.models.openai_responses", + "description": "src.agents.models.openai_responses", + "peekOfCode": "_HEADERS = {\"User-Agent\": _USER_AGENT}\nclass OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:", + "detail": "src.agents.models.openai_responses", + "documentation": {} + }, + { + "label": "RealtimeAgent", + "kind": 6, + "importPath": "src.agents.realtime.agent", + "description": "src.agents.realtime.agent", + "peekOfCode": "class RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `modelSettings` is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `outputType` is not supported, as RealtimeAgents do not support structured outputs.\n - `toolUseBehavior` is not supported, as all RealtimeAgents will be handled by the same model", + "detail": "src.agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeAgentHooks", + "kind": 5, + "importPath": "src.agents.realtime.agent", + "description": "src.agents.realtime.agent", + "peekOfCode": "RealtimeAgentHooks = AgentHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Agent hooks for `RealtimeAgent`s.\"\"\"\nRealtimeRunHooks = RunHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Run hooks for `RealtimeAgent`s.\"\"\"\n@dataclass\nclass RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model", + "detail": "src.agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeRunHooks", + "kind": 5, + "importPath": "src.agents.realtime.agent", + "description": "src.agents.realtime.agent", + "peekOfCode": "RealtimeRunHooks = RunHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Run hooks for `RealtimeAgent`s.\"\"\"\n@dataclass\nclass RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `modelSettings` is not supported, as all RealtimeAgents will be handled by the same model", + "detail": "src.agents.realtime.agent", + "documentation": {} + }, + { + "label": "RealtimeClientMessage", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeClientMessage(TypedDict):\n \"\"\"A raw message to be sent to the model.\"\"\"\n type: str # explicitly required\n other_data: NotRequired[dict[str, Any]]\n \"\"\"Merged into the message body.\"\"\"\nclass RealtimeInputAudioTranscriptionConfig(TypedDict):\n language: NotRequired[str]\n model: NotRequired[Literal[\"gpt-4o-transcribe\", \"gpt-4o-mini-transcribe\", \"whisper-1\"] | str]\n prompt: NotRequired[str]\nclass RealtimeTurnDetectionConfig(TypedDict):", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeInputAudioTranscriptionConfig", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeInputAudioTranscriptionConfig(TypedDict):\n language: NotRequired[str]\n model: NotRequired[Literal[\"gpt-4o-transcribe\", \"gpt-4o-mini-transcribe\", \"whisper-1\"] | str]\n prompt: NotRequired[str]\nclass RealtimeTurnDetectionConfig(TypedDict):\n \"\"\"Turn detection config. Allows extra vendor keys if needed.\"\"\"\n type: NotRequired[Literal[\"semantic_vad\", \"server_vad\"]]\n create_response: NotRequired[bool]\n eagerness: NotRequired[Literal[\"auto\", \"low\", \"medium\", \"high\"]]\n interrupt_response: NotRequired[bool]", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeTurnDetectionConfig", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeTurnDetectionConfig(TypedDict):\n \"\"\"Turn detection config. Allows extra vendor keys if needed.\"\"\"\n type: NotRequired[Literal[\"semantic_vad\", \"server_vad\"]]\n create_response: NotRequired[bool]\n eagerness: NotRequired[Literal[\"auto\", \"low\", \"medium\", \"high\"]]\n interrupt_response: NotRequired[bool]\n prefix_padding_ms: NotRequired[int]\n silence_duration_ms: NotRequired[int]\n threshold: NotRequired[float]\nclass RealtimeSessionModelSettings(TypedDict):", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeSessionModelSettings", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeSessionModelSettings(TypedDict):\n \"\"\"Model settings for a realtime model session.\"\"\"\n model_name: NotRequired[RealtimeModelName]\n instructions: NotRequired[str]\n modalities: NotRequired[list[Literal[\"text\", \"audio\"]]]\n voice: NotRequired[str]\n input_audio_format: NotRequired[RealtimeAudioFormat]\n output_audio_format: NotRequired[RealtimeAudioFormat]\n input_audio_transcription: NotRequired[RealtimeInputAudioTranscriptionConfig]\n turn_detection: NotRequired[RealtimeTurnDetectionConfig]", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeGuardrailsSettings", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeGuardrailsSettings(TypedDict):\n \"\"\"Settings for output guardrails in realtime sessions.\"\"\"\n debounce_text_length: NotRequired[int]\n \"\"\"\n The minimum number of characters to accumulate before running guardrails on transcript\n deltas. Defaults to 100. Guardrails run every time the accumulated text reaches\n 1x, 2x, 3x, etc. times this threshold.\n \"\"\"\nclass RealtimeModelTracingConfig(TypedDict):\n \"\"\"Configuration for tracing in realtime model sessions.\"\"\"", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeModelTracingConfig", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeModelTracingConfig(TypedDict):\n \"\"\"Configuration for tracing in realtime model sessions.\"\"\"\n workflow_name: NotRequired[str]\n \"\"\"The workflow name to use for tracing.\"\"\"\n group_id: NotRequired[str]\n \"\"\"A group identifier to use for tracing, to link multiple traces together.\"\"\"\n metadata: NotRequired[dict[str, Any]]\n \"\"\"Additional metadata to include with the trace.\"\"\"\nclass RealtimeRunConfig(TypedDict):\n model_settings: NotRequired[RealtimeSessionModelSettings]", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeRunConfig", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeRunConfig(TypedDict):\n model_settings: NotRequired[RealtimeSessionModelSettings]\n output_guardrails: NotRequired[list[OutputGuardrail[Any]]]\n \"\"\"List of output guardrails to run on the agent's responses.\"\"\"\n guardrails_settings: NotRequired[RealtimeGuardrailsSettings]\n \"\"\"Settings for guardrail execution.\"\"\"\n tracing_disabled: NotRequired[bool]\n \"\"\"Whether tracing is disabled for this run.\"\"\"\n # TODO (rm) Add history audio storage config\nclass RealtimeUserInputText(TypedDict):", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeUserInputText", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeUserInputText(TypedDict):\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeUserInputMessage(TypedDict):\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeUserInputText]\nRealtimeUserInput: TypeAlias = Union[str, RealtimeUserInputMessage]", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeUserInputMessage", + "kind": 6, + "importPath": "src.agents.realtime.config", + "description": "src.agents.realtime.config", + "peekOfCode": "class RealtimeUserInputMessage(TypedDict):\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeUserInputText]\nRealtimeUserInput: TypeAlias = Union[str, RealtimeUserInputMessage]", + "detail": "src.agents.realtime.config", + "documentation": {} + }, + { + "label": "RealtimeEventInfo", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeEventInfo:\n context: RunContextWrapper\n \"\"\"The context for the event.\"\"\"\n@dataclass\nclass RealtimeAgentStartEvent:\n \"\"\"A new agent has started.\"\"\"\n agent: RealtimeAgent\n \"\"\"The new agent.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAgentStartEvent", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeAgentStartEvent:\n \"\"\"A new agent has started.\"\"\"\n agent: RealtimeAgent\n \"\"\"The new agent.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"agent_start\"] = \"agent_start\"\n@dataclass\nclass RealtimeAgentEndEvent:\n \"\"\"An agent has ended.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAgentEndEvent", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeAgentEndEvent:\n \"\"\"An agent has ended.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that ended.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"agent_end\"] = \"agent_end\"\n@dataclass\nclass RealtimeHandoffEvent:\n \"\"\"An agent has handed off to another agent.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeHandoffEvent", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeHandoffEvent:\n \"\"\"An agent has handed off to another agent.\"\"\"\n from_agent: RealtimeAgent\n \"\"\"The agent that handed off.\"\"\"\n to_agent: RealtimeAgent\n \"\"\"The agent that was handed off to.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"handoff\"] = \"handoff\"\n@dataclass", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeToolStart", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeToolStart:\n \"\"\"An agent is starting a tool call.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that updated.\"\"\"\n tool: Tool\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"tool_start\"] = \"tool_start\"\n@dataclass\nclass RealtimeToolEnd:", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeToolEnd", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeToolEnd:\n \"\"\"An agent has ended a tool call.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that ended the tool call.\"\"\"\n tool: Tool\n \"\"\"The tool that was called.\"\"\"\n output: Any\n \"\"\"The output of the tool call.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeRawModelEvent", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeRawModelEvent:\n \"\"\"Forwards raw events from the model layer.\"\"\"\n data: RealtimeModelEvent\n \"\"\"The raw data from the model layer.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"raw_model_event\"] = \"raw_model_event\"\n@dataclass\nclass RealtimeAudioEnd:\n \"\"\"Triggered when the agent stops generating audio.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudioEnd", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeAudioEnd:\n \"\"\"Triggered when the agent stops generating audio.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio_end\"] = \"audio_end\"\n@dataclass\nclass RealtimeAudio:\n \"\"\"Triggered when the agent generates new audio to be played.\"\"\"\n audio: RealtimeModelAudioEvent\n \"\"\"The audio event from the model layer.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudio", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeAudio:\n \"\"\"Triggered when the agent generates new audio to be played.\"\"\"\n audio: RealtimeModelAudioEvent\n \"\"\"The audio event from the model layer.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio\"] = \"audio\"\n@dataclass\nclass RealtimeAudioInterrupted:\n \"\"\"Triggered when the agent is interrupted. Can be listened to by the user to stop audio", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeAudioInterrupted", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeAudioInterrupted:\n \"\"\"Triggered when the agent is interrupted. Can be listened to by the user to stop audio\n playback or give visual indicators to the user.\n \"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass\nclass RealtimeError:\n \"\"\"An error has occurred.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeError", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeError:\n \"\"\"An error has occurred.\"\"\"\n error: Any\n \"\"\"The error that occurred.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"error\"] = \"error\"\n@dataclass\nclass RealtimeHistoryUpdated:\n \"\"\"The history has been updated. Contains the full history of the session.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeHistoryUpdated", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeHistoryUpdated:\n \"\"\"The history has been updated. Contains the full history of the session.\"\"\"\n history: list[RealtimeItem]\n \"\"\"The full history of the session.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"history_updated\"] = \"history_updated\"\n@dataclass\nclass RealtimeHistoryAdded:\n \"\"\"A new item has been added to the history.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeHistoryAdded", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeHistoryAdded:\n \"\"\"A new item has been added to the history.\"\"\"\n item: RealtimeItem\n \"\"\"The new item that was added to the history.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"history_added\"] = \"history_added\"\n@dataclass\nclass RealtimeGuardrailTripped:\n \"\"\"A guardrail has been tripped and the agent has been interrupted.\"\"\"", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "RealtimeGuardrailTripped", + "kind": 6, + "importPath": "src.agents.realtime.events", + "description": "src.agents.realtime.events", + "peekOfCode": "class RealtimeGuardrailTripped:\n \"\"\"A guardrail has been tripped and the agent has been interrupted.\"\"\"\n guardrail_results: list[OutputGuardrailResult]\n \"\"\"The results from all triggered guardrails.\"\"\"\n message: str\n \"\"\"The message that was being generated when the guardrail was triggered.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"guardrail_tripped\"] = \"guardrail_tripped\"\nRealtimeSessionEvent: TypeAlias = Union[", + "detail": "src.agents.realtime.events", + "documentation": {} + }, + { + "label": "InputText", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class InputText(BaseModel):\n type: Literal[\"input_text\"] = \"input_text\"\n text: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass InputAudio(BaseModel):\n type: Literal[\"input_audio\"] = \"input_audio\"\n audio: str | None = None\n transcript: str | None = None\n # Allow extra data", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "InputAudio", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class InputAudio(BaseModel):\n type: Literal[\"input_audio\"] = \"input_audio\"\n audio: str | None = None\n transcript: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass AssistantText(BaseModel):\n type: Literal[\"text\"] = \"text\"\n text: str | None = None\n # Allow extra data", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "AssistantText", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class AssistantText(BaseModel):\n type: Literal[\"text\"] = \"text\"\n text: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass SystemMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"system\"] = \"system\"", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "SystemMessageItem", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class SystemMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"system\"] = \"system\"\n content: list[InputText]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass UserMessageItem(BaseModel):\n item_id: str", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "UserMessageItem", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class UserMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"user\"] = \"user\"\n content: list[Annotated[InputText | InputAudio, Field(discriminator=\"type\")]]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass AssistantMessageItem(BaseModel):\n item_id: str", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "AssistantMessageItem", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class AssistantMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"assistant\"] = \"assistant\"\n status: Literal[\"in_progress\", \"completed\", \"incomplete\"] | None = None\n content: list[AssistantText]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nRealtimeMessageItem = Annotated[", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeToolCallItem", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class RealtimeToolCallItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n call_id: str | None\n type: Literal[\"function_call\"] = \"function_call\"\n status: Literal[\"in_progress\", \"completed\"]\n arguments: str\n name: str\n output: str | None = None\n # Allow extra data", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeResponse", + "kind": 6, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "class RealtimeResponse(BaseModel):\n id: str\n output: list[RealtimeMessageItem]", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeMessageItem", + "kind": 5, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "RealtimeMessageItem = Annotated[\n Union[SystemMessageItem, UserMessageItem, AssistantMessageItem],\n Field(discriminator=\"role\"),\n]\nclass RealtimeToolCallItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n call_id: str | None\n type: Literal[\"function_call\"] = \"function_call\"\n status: Literal[\"in_progress\", \"completed\"]", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeItem", + "kind": 5, + "importPath": "src.agents.realtime.items", + "description": "src.agents.realtime.items", + "peekOfCode": "RealtimeItem = Union[RealtimeMessageItem, RealtimeToolCallItem]\nclass RealtimeResponse(BaseModel):\n id: str\n output: list[RealtimeMessageItem]", + "detail": "src.agents.realtime.items", + "documentation": {} + }, + { + "label": "RealtimeModelListener", + "kind": 6, + "importPath": "src.agents.realtime.model", + "description": "src.agents.realtime.model", + "peekOfCode": "class RealtimeModelListener(abc.ABC):\n \"\"\"A listener for realtime transport events.\"\"\"\n @abc.abstractmethod\n async def on_event(self, event: RealtimeModelEvent) -> None:\n \"\"\"Called when an event is emitted by the realtime transport.\"\"\"\n pass\nclass RealtimeModelConfig(TypedDict):\n \"\"\"Options for connecting to a realtime model.\"\"\"\n api_key: NotRequired[str | Callable[[], MaybeAwaitable[str]]]\n \"\"\"The API key (or function that returns a key) to use when connecting. If unset, the model will", + "detail": "src.agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModelConfig", + "kind": 6, + "importPath": "src.agents.realtime.model", + "description": "src.agents.realtime.model", + "peekOfCode": "class RealtimeModelConfig(TypedDict):\n \"\"\"Options for connecting to a realtime model.\"\"\"\n api_key: NotRequired[str | Callable[[], MaybeAwaitable[str]]]\n \"\"\"The API key (or function that returns a key) to use when connecting. If unset, the model will\n try to use a sane default. For example, the OpenAI Realtime model will try to use the\n `OPENAI_API_KEY` environment variable.\n \"\"\"\n url: NotRequired[str]\n \"\"\"The URL to use when connecting. If unset, the model will use a sane default. For example,\n the OpenAI Realtime model will use the default OpenAI WebSocket URL.", + "detail": "src.agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModel", + "kind": 6, + "importPath": "src.agents.realtime.model", + "description": "src.agents.realtime.model", + "peekOfCode": "class RealtimeModel(abc.ABC):\n \"\"\"Interface for connecting to a realtime model and sending/receiving events.\"\"\"\n @abc.abstractmethod\n async def connect(self, options: RealtimeModelConfig) -> None:\n \"\"\"Establish a connection to the model and keep it alive.\"\"\"\n pass\n @abc.abstractmethod\n def add_listener(self, listener: RealtimeModelListener) -> None:\n \"\"\"Add a listener to the model.\"\"\"\n pass", + "detail": "src.agents.realtime.model", + "documentation": {} + }, + { + "label": "RealtimeModelErrorEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelErrorEvent:\n \"\"\"Represents a transport‑layer error.\"\"\"\n error: Any\n type: Literal[\"error\"] = \"error\"\n@dataclass\nclass RealtimeModelToolCallEvent:\n \"\"\"Model attempted a tool/function call.\"\"\"\n name: str\n call_id: str\n arguments: str", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelToolCallEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelToolCallEvent:\n \"\"\"Model attempted a tool/function call.\"\"\"\n name: str\n call_id: str\n arguments: str\n id: str | None = None\n previous_item_id: str | None = None\n type: Literal[\"function_call\"] = \"function_call\"\n@dataclass\nclass RealtimeModelAudioEvent:", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelAudioEvent:\n \"\"\"Raw audio bytes emitted by the model.\"\"\"\n data: bytes\n response_id: str\n type: Literal[\"audio\"] = \"audio\"\n@dataclass\nclass RealtimeModelAudioInterruptedEvent:\n \"\"\"Audio interrupted.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioInterruptedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelAudioInterruptedEvent:\n \"\"\"Audio interrupted.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass\nclass RealtimeModelAudioDoneEvent:\n \"\"\"Audio done.\"\"\"\n type: Literal[\"audio_done\"] = \"audio_done\"\n@dataclass\nclass RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelAudioDoneEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelAudioDoneEvent:\n \"\"\"Audio done.\"\"\"\n type: Literal[\"audio_done\"] = \"audio_done\"\n@dataclass\nclass RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"\n item_id: str\n transcript: str\n type: Literal[\"input_audio_transcription_completed\"] = \"input_audio_transcription_completed\"\n@dataclass", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelInputAudioTranscriptionCompletedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"\n item_id: str\n transcript: str\n type: Literal[\"input_audio_transcription_completed\"] = \"input_audio_transcription_completed\"\n@dataclass\nclass RealtimeModelTranscriptDeltaEvent:\n \"\"\"Partial transcript update.\"\"\"\n item_id: str\n delta: str", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTranscriptDeltaEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelTranscriptDeltaEvent:\n \"\"\"Partial transcript update.\"\"\"\n item_id: str\n delta: str\n response_id: str\n type: Literal[\"transcript_delta\"] = \"transcript_delta\"\n@dataclass\nclass RealtimeModelItemUpdatedEvent:\n \"\"\"Item added to the history or updated.\"\"\"\n item: RealtimeItem", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelItemUpdatedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelItemUpdatedEvent:\n \"\"\"Item added to the history or updated.\"\"\"\n item: RealtimeItem\n type: Literal[\"item_updated\"] = \"item_updated\"\n@dataclass\nclass RealtimeModelItemDeletedEvent:\n \"\"\"Item deleted from the history.\"\"\"\n item_id: str\n type: Literal[\"item_deleted\"] = \"item_deleted\"\n@dataclass", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelItemDeletedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelItemDeletedEvent:\n \"\"\"Item deleted from the history.\"\"\"\n item_id: str\n type: Literal[\"item_deleted\"] = \"item_deleted\"\n@dataclass\nclass RealtimeModelConnectionStatusEvent:\n \"\"\"Connection status changed.\"\"\"\n status: RealtimeConnectionStatus\n type: Literal[\"connection_status\"] = \"connection_status\"\n@dataclass", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelConnectionStatusEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelConnectionStatusEvent:\n \"\"\"Connection status changed.\"\"\"\n status: RealtimeConnectionStatus\n type: Literal[\"connection_status\"] = \"connection_status\"\n@dataclass\nclass RealtimeModelTurnStartedEvent:\n \"\"\"Triggered when the model starts generating a response for a turn.\"\"\"\n type: Literal[\"turn_started\"] = \"turn_started\"\n@dataclass\nclass RealtimeModelTurnEndedEvent:", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTurnStartedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelTurnStartedEvent:\n \"\"\"Triggered when the model starts generating a response for a turn.\"\"\"\n type: Literal[\"turn_started\"] = \"turn_started\"\n@dataclass\nclass RealtimeModelTurnEndedEvent:\n \"\"\"Triggered when the model finishes generating a response for a turn.\"\"\"\n type: Literal[\"turn_ended\"] = \"turn_ended\"\n@dataclass\nclass RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelTurnEndedEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelTurnEndedEvent:\n \"\"\"Triggered when the model finishes generating a response for a turn.\"\"\"\n type: Literal[\"turn_ended\"] = \"turn_ended\"\n@dataclass\nclass RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"\n data: Any\n type: Literal[\"other\"] = \"other\"\n@dataclass\nclass RealtimeModelExceptionEvent:", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelOtherEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"\n data: Any\n type: Literal[\"other\"] = \"other\"\n@dataclass\nclass RealtimeModelExceptionEvent:\n \"\"\"Exception occurred during model operation.\"\"\"\n exception: Exception\n context: str | None = None\n type: Literal[\"exception\"] = \"exception\"", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelExceptionEvent", + "kind": 6, + "importPath": "src.agents.realtime.model_events", + "description": "src.agents.realtime.model_events", + "peekOfCode": "class RealtimeModelExceptionEvent:\n \"\"\"Exception occurred during model operation.\"\"\"\n exception: Exception\n context: str | None = None\n type: Literal[\"exception\"] = \"exception\"\n# TODO (rm) Add usage events\nRealtimeModelEvent: TypeAlias = Union[\n RealtimeModelErrorEvent,\n RealtimeModelToolCallEvent,\n RealtimeModelAudioEvent,", + "detail": "src.agents.realtime.model_events", + "documentation": {} + }, + { + "label": "RealtimeModelRawClientMessage", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelRawClientMessage(TypedDict):\n \"\"\"A raw message to be sent to the model.\"\"\"\n type: str # explicitly required\n other_data: NotRequired[dict[str, Any]]\n \"\"\"Merged into the message body.\"\"\"\nclass RealtimeModelInputTextContent(TypedDict):\n \"\"\"A piece of text to be sent to the model.\"\"\"\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeModelUserInputMessage(TypedDict):", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelInputTextContent", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelInputTextContent(TypedDict):\n \"\"\"A piece of text to be sent to the model.\"\"\"\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeModelUserInputMessage(TypedDict):\n \"\"\"A message to be sent to the model.\"\"\"\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeModelInputTextContent]\nRealtimeModelUserInput: TypeAlias = Union[str, RealtimeModelUserInputMessage]", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelUserInputMessage", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelUserInputMessage(TypedDict):\n \"\"\"A message to be sent to the model.\"\"\"\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeModelInputTextContent]\nRealtimeModelUserInput: TypeAlias = Union[str, RealtimeModelUserInputMessage]\n\"\"\"A user input to be sent to the model.\"\"\"\n# Model messages\n@dataclass\nclass RealtimeModelSendRawMessage:", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendRawMessage", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendRawMessage:\n \"\"\"Send a raw message to the model.\"\"\"\n message: RealtimeModelRawClientMessage\n \"\"\"The message to send.\"\"\"\n@dataclass\nclass RealtimeModelSendUserInput:\n \"\"\"Send a user input to the model.\"\"\"\n user_input: RealtimeModelUserInput\n \"\"\"The user input to send.\"\"\"\n@dataclass", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendUserInput", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendUserInput:\n \"\"\"Send a user input to the model.\"\"\"\n user_input: RealtimeModelUserInput\n \"\"\"The user input to send.\"\"\"\n@dataclass\nclass RealtimeModelSendAudio:\n \"\"\"Send audio to the model.\"\"\"\n audio: bytes\n commit: bool = False\n@dataclass", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendAudio", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendAudio:\n \"\"\"Send audio to the model.\"\"\"\n audio: bytes\n commit: bool = False\n@dataclass\nclass RealtimeModelSendToolOutput:\n \"\"\"Send tool output to the model.\"\"\"\n tool_call: RealtimeModelToolCallEvent\n \"\"\"The tool call to send.\"\"\"\n output: str", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendToolOutput", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendToolOutput:\n \"\"\"Send tool output to the model.\"\"\"\n tool_call: RealtimeModelToolCallEvent\n \"\"\"The tool call to send.\"\"\"\n output: str\n \"\"\"The output to send.\"\"\"\n start_response: bool\n \"\"\"Whether to start a response.\"\"\"\n@dataclass\nclass RealtimeModelSendInterrupt:", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendInterrupt", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendInterrupt:\n \"\"\"Send an interrupt to the model.\"\"\"\n@dataclass\nclass RealtimeModelSendSessionUpdate:\n \"\"\"Send a session update to the model.\"\"\"\n session_settings: RealtimeSessionModelSettings\n \"\"\"The updated session settings to send.\"\"\"\nRealtimeModelSendEvent: TypeAlias = Union[\n RealtimeModelSendRawMessage,\n RealtimeModelSendUserInput,", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "RealtimeModelSendSessionUpdate", + "kind": 6, + "importPath": "src.agents.realtime.model_inputs", + "description": "src.agents.realtime.model_inputs", + "peekOfCode": "class RealtimeModelSendSessionUpdate:\n \"\"\"Send a session update to the model.\"\"\"\n session_settings: RealtimeSessionModelSettings\n \"\"\"The updated session settings to send.\"\"\"\nRealtimeModelSendEvent: TypeAlias = Union[\n RealtimeModelSendRawMessage,\n RealtimeModelSendUserInput,\n RealtimeModelSendAudio,\n RealtimeModelSendToolOutput,\n RealtimeModelSendInterrupt,", + "detail": "src.agents.realtime.model_inputs", + "documentation": {} + }, + { + "label": "OpenAIRealtimeWebSocketModel", + "kind": 6, + "importPath": "src.agents.realtime.openai_realtime", + "description": "src.agents.realtime.openai_realtime", + "peekOfCode": "class OpenAIRealtimeWebSocketModel(RealtimeModel):\n \"\"\"A model that uses OpenAI's WebSocket API.\"\"\"\n def __init__(self) -> None:\n self.model = \"gpt-4o-realtime-preview\" # Default model\n self._websocket: ClientConnection | None = None\n self._websocket_task: asyncio.Task[None] | None = None\n self._listeners: list[RealtimeModelListener] = []\n self._current_item_id: str | None = None\n self._audio_start_time: datetime | None = None\n self._audio_length_ms: float = 0.0", + "detail": "src.agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "_ConversionHelper", + "kind": 6, + "importPath": "src.agents.realtime.openai_realtime", + "description": "src.agents.realtime.openai_realtime", + "peekOfCode": "class _ConversionHelper:\n @classmethod\n def conversation_item_to_realtime_message_item(\n cls, item: ConversationItem, previous_item_id: str | None\n ) -> RealtimeMessageItem:\n return TypeAdapter(RealtimeMessageItem).validate_python(\n {\n \"item_id\": item.id or \"\",\n \"previous_item_id\": previous_item_id,\n \"type\": item.type,", + "detail": "src.agents.realtime.openai_realtime", + "documentation": {} + }, + { + "label": "RealtimeRunner", + "kind": 6, + "importPath": "src.agents.realtime.runner", + "description": "src.agents.realtime.runner", + "peekOfCode": "class RealtimeRunner:\n \"\"\"A `RealtimeRunner` is the equivalent of `Runner` for realtime agents. It automatically\n handles multiple turns by maintaining a persistent connection with the underlying model\n layer.\n The session manages the local history copy, executes tools, runs guardrails and facilitates\n handoffs between agents.\n Since this code runs on your server, it uses WebSockets by default. You can optionally create\n your own custom model layer by implementing the `RealtimeModel` interface.\n \"\"\"\n def __init__(", + "detail": "src.agents.realtime.runner", + "documentation": {} + }, + { + "label": "RealtimeSession", + "kind": 6, + "importPath": "src.agents.realtime.session", + "description": "src.agents.realtime.session", + "peekOfCode": "class RealtimeSession(RealtimeModelListener):\n \"\"\"A connection to a realtime model. It streams events from the model to you, and allows you to\n send messages and audio to the model.\n Example:\n ```python\n runner = RealtimeRunner(agent)\n async with await runner.run() as session:\n # Send messages\n await session.send_message(\"Hello\")\n await session.send_audio(audio_bytes)", + "detail": "src.agents.realtime.session", + "documentation": {} + }, + { + "label": "trace", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def trace(\n workflow_name: str,\n trace_id: str | None = None,\n group_id: str | None = None,\n metadata: dict[str, Any] | None = None,\n disabled: bool = False,\n) -> Trace:\n \"\"\"\n Create a new trace. The trace will not be started automatically; you should either use\n it as a context manager (`with trace(...):`) or call `trace.start()` + `trace.finish()`", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "get_current_trace", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def get_current_trace() -> Trace | None:\n \"\"\"Returns the currently active trace, if present.\"\"\"\n return get_trace_provider().get_current_trace()\ndef get_current_span() -> Span[Any] | None:\n \"\"\"Returns the currently active span, if present.\"\"\"\n return get_trace_provider().get_current_span()\ndef agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "get_current_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def get_current_span() -> Span[Any] | None:\n \"\"\"Returns the currently active span, if present.\"\"\"\n return get_trace_provider().get_current_span()\ndef agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,\n output_type: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "agent_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,\n output_type: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[AgentSpanData]:\n \"\"\"Create a new agent span. The span will not be started automatically, you should either do", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "function_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def function_span(\n name: str,\n input: str | None = None,\n output: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[FunctionSpanData]:\n \"\"\"Create a new function span. The span will not be started automatically, you should either do\n `with function_span() ...` or call `span.start()` + `span.finish()` manually.", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "generation_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def generation_span(\n input: Sequence[Mapping[str, Any]] | None = None,\n output: Sequence[Mapping[str, Any]] | None = None,\n model: str | None = None,\n model_config: Mapping[str, Any] | None = None,\n usage: dict[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[GenerationSpanData]:", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "response_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def response_span(\n response: Response | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[ResponseSpanData]:\n \"\"\"Create a new response span. The span will not be started automatically, you should either do\n `with response_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:\n response: The OpenAI Response object.", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "handoff_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def handoff_span(\n from_agent: str | None = None,\n to_agent: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[HandoffSpanData]:\n \"\"\"Create a new handoff span. The span will not be started automatically, you should either do\n `with handoff_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "custom_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def custom_span(\n name: str,\n data: dict[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[CustomSpanData]:\n \"\"\"Create a new custom span, to which you can add your own metadata. The span will not be\n started automatically, you should either do `with custom_span() ...` or call\n `span.start()` + `span.finish()` manually.", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "guardrail_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def guardrail_span(\n name: str,\n triggered: bool = False,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[GuardrailSpanData]:\n \"\"\"Create a new guardrail span. The span will not be started automatically, you should either\n do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "transcription_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def transcription_span(\n model: str | None = None,\n input: str | None = None,\n input_format: str | None = \"pcm\",\n output: str | None = None,\n model_config: Mapping[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[TranscriptionSpanData]:", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "speech_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def speech_span(\n model: str | None = None,\n input: str | None = None,\n output: str | None = None,\n output_format: str | None = \"pcm\",\n model_config: Mapping[str, Any] | None = None,\n first_content_at: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "speech_group_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def speech_group_span(\n input: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[SpeechGroupSpanData]:\n \"\"\"Create a new speech group span. The span will not be started automatically, you should\n either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:\n input: The input text used for the speech request.", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "mcp_tools_span", + "kind": 2, + "importPath": "src.agents.tracing.create", + "description": "src.agents.tracing.create", + "peekOfCode": "def mcp_tools_span(\n server: str | None = None,\n result: list[str] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[MCPListToolsSpanData]:\n \"\"\"Create a new MCP list tools span. The span will not be started automatically, you should\n either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", + "detail": "src.agents.tracing.create", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "src.agents.tracing.logger", + "description": "src.agents.tracing.logger", + "peekOfCode": "logger = logging.getLogger(\"openai.agents.tracing\")", + "detail": "src.agents.tracing.logger", + "documentation": {} + }, + { + "label": "TracingProcessor", + "kind": 6, + "importPath": "src.agents.tracing.processor_interface", + "description": "src.agents.tracing.processor_interface", + "peekOfCode": "class TracingProcessor(abc.ABC):\n \"\"\"Interface for processing spans.\"\"\"\n @abc.abstractmethod\n def on_trace_start(self, trace: \"Trace\") -> None:\n \"\"\"Called when a trace is started.\n Args:\n trace: The trace that started.\n \"\"\"\n pass\n @abc.abstractmethod", + "detail": "src.agents.tracing.processor_interface", + "documentation": {} + }, + { + "label": "TracingExporter", + "kind": 6, + "importPath": "src.agents.tracing.processor_interface", + "description": "src.agents.tracing.processor_interface", + "peekOfCode": "class TracingExporter(abc.ABC):\n \"\"\"Exports traces and spans. For example, could log them or send them to a backend.\"\"\"\n @abc.abstractmethod\n def export(self, items: list[\"Trace | Span[Any]\"]) -> None:\n \"\"\"Exports a list of traces and spans.\n Args:\n items: The items to export.\n \"\"\"\n pass", + "detail": "src.agents.tracing.processor_interface", + "documentation": {} + }, + { + "label": "ConsoleSpanExporter", + "kind": 6, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "class ConsoleSpanExporter(TracingExporter):\n \"\"\"Prints the traces and spans to the console.\"\"\"\n def export(self, items: list[Trace | Span[Any]]) -> None:\n for item in items:\n if isinstance(item, Trace):\n print(f\"[Exporter] Export trace_id={item.trace_id}, name={item.name}, \")\n else:\n print(f\"[Exporter] Export span: {item.export()}\")\nclass BackendSpanExporter(TracingExporter):\n def __init__(", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "BackendSpanExporter", + "kind": 6, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "class BackendSpanExporter(TracingExporter):\n def __init__(\n self,\n api_key: str | None = None,\n organization: str | None = None,\n project: str | None = None,\n endpoint: str = \"https://api.openai.com/v1/traces/ingest\",\n max_retries: int = 3,\n base_delay: float = 1.0,\n max_delay: float = 30.0,", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "BatchTraceProcessor", + "kind": 6, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "class BatchTraceProcessor(TracingProcessor):\n \"\"\"Some implementation notes:\n 1. Using Queue, which is thread-safe.\n 2. Using a background thread to export spans, to minimize any performance issues.\n 3. Spans are stored in memory until they are exported.\n \"\"\"\n def __init__(\n self,\n exporter: TracingExporter,\n max_queue_size: int = 8192,", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "default_exporter", + "kind": 2, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "def default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "default_processor", + "kind": 2, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "def default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "_global_exporter", + "kind": 5, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "_global_exporter = BackendSpanExporter()\n_global_processor = BatchTraceProcessor(_global_exporter)\ndef default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "_global_processor", + "kind": 5, + "importPath": "src.agents.tracing.processors", + "description": "src.agents.tracing.processors", + "peekOfCode": "_global_processor = BatchTraceProcessor(_global_exporter)\ndef default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", + "detail": "src.agents.tracing.processors", + "documentation": {} + }, + { + "label": "SynchronousMultiTracingProcessor", + "kind": 6, + "importPath": "src.agents.tracing.provider", + "description": "src.agents.tracing.provider", + "peekOfCode": "class SynchronousMultiTracingProcessor(TracingProcessor):\n \"\"\"\n Forwards all calls to a list of TracingProcessors, in order of registration.\n \"\"\"\n def __init__(self):\n # Using a tuple to avoid race conditions when iterating over processors\n self._processors: tuple[TracingProcessor, ...] = ()\n self._lock = threading.Lock()\n def add_tracing_processor(self, tracing_processor: TracingProcessor):\n \"\"\"", + "detail": "src.agents.tracing.provider", + "documentation": {} + }, + { + "label": "TraceProvider", + "kind": 6, + "importPath": "src.agents.tracing.provider", + "description": "src.agents.tracing.provider", + "peekOfCode": "class TraceProvider(ABC):\n \"\"\"Interface for creating traces and spans.\"\"\"\n @abstractmethod\n def register_processor(self, processor: TracingProcessor) -> None:\n \"\"\"Add a processor that will receive all traces and spans.\"\"\"\n @abstractmethod\n def set_processors(self, processors: list[TracingProcessor]) -> None:\n \"\"\"Replace the list of processors with ``processors``.\"\"\"\n @abstractmethod\n def get_current_trace(self) -> Trace | None:", + "detail": "src.agents.tracing.provider", + "documentation": {} + }, + { + "label": "DefaultTraceProvider", + "kind": 6, + "importPath": "src.agents.tracing.provider", + "description": "src.agents.tracing.provider", + "peekOfCode": "class DefaultTraceProvider(TraceProvider):\n def __init__(self) -> None:\n self._multi_processor = SynchronousMultiTracingProcessor()\n self._disabled = os.environ.get(\"OPENAI_AGENTS_DISABLE_TRACING\", \"false\").lower() in (\n \"true\",\n \"1\",\n )\n def register_processor(self, processor: TracingProcessor):\n \"\"\"\n Add a processor to the list of processors. Each processor will receive all traces/spans.", + "detail": "src.agents.tracing.provider", + "documentation": {} + }, + { + "label": "Scope", + "kind": 6, + "importPath": "src.agents.tracing.scope", + "description": "src.agents.tracing.scope", + "peekOfCode": "class Scope:\n \"\"\"\n Manages the current span and trace in the context.\n \"\"\"\n @classmethod\n def get_current_span(cls) -> \"Span[Any] | None\":\n return _current_span.get()\n @classmethod\n def set_current_span(cls, span: \"Span[Any] | None\") -> \"contextvars.Token[Span[Any] | None]\":\n return _current_span.set(span)", + "detail": "src.agents.tracing.scope", + "documentation": {} + }, + { + "label": "set_trace_provider", + "kind": 2, + "importPath": "src.agents.tracing.setup", + "description": "src.agents.tracing.setup", + "peekOfCode": "def set_trace_provider(provider: TraceProvider) -> None:\n \"\"\"Set the global trace provider used by tracing utilities.\"\"\"\n global GLOBAL_TRACE_PROVIDER\n GLOBAL_TRACE_PROVIDER = provider\ndef get_trace_provider() -> TraceProvider:\n \"\"\"Get the global trace provider used by tracing utilities.\"\"\"\n if GLOBAL_TRACE_PROVIDER is None:\n raise RuntimeError(\"Trace provider not set\")\n return GLOBAL_TRACE_PROVIDER", + "detail": "src.agents.tracing.setup", + "documentation": {} + }, + { + "label": "get_trace_provider", + "kind": 2, + "importPath": "src.agents.tracing.setup", + "description": "src.agents.tracing.setup", + "peekOfCode": "def get_trace_provider() -> TraceProvider:\n \"\"\"Get the global trace provider used by tracing utilities.\"\"\"\n if GLOBAL_TRACE_PROVIDER is None:\n raise RuntimeError(\"Trace provider not set\")\n return GLOBAL_TRACE_PROVIDER", + "detail": "src.agents.tracing.setup", + "documentation": {} + }, + { + "label": "SpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class SpanData(abc.ABC):\n \"\"\"\n Represents span data in the trace.\n \"\"\"\n @abc.abstractmethod\n def export(self) -> dict[str, Any]:\n \"\"\"Export the span data as a dictionary.\"\"\"\n pass\n @property\n @abc.abstractmethod", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "AgentSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class AgentSpanData(SpanData):\n \"\"\"\n Represents an Agent Span in the trace.\n Includes name, handoffs, tools, and output type.\n \"\"\"\n __slots__ = (\"name\", \"handoffs\", \"tools\", \"output_type\")\n def __init__(\n self,\n name: str,\n handoffs: list[str] | None = None,", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "FunctionSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class FunctionSpanData(SpanData):\n \"\"\"\n Represents a Function Span in the trace.\n Includes input, output and MCP data (if applicable).\n \"\"\"\n __slots__ = (\"name\", \"input\", \"output\", \"mcp_data\")\n def __init__(\n self,\n name: str,\n input: str | None,", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "GenerationSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class GenerationSpanData(SpanData):\n \"\"\"\n Represents a Generation Span in the trace.\n Includes input, output, model, model configuration, and usage.\n \"\"\"\n __slots__ = (\n \"input\",\n \"output\",\n \"model\",\n \"model_config\",", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "ResponseSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class ResponseSpanData(SpanData):\n \"\"\"\n Represents a Response Span in the trace.\n Includes response and input.\n \"\"\"\n __slots__ = (\"response\", \"input\")\n def __init__(\n self,\n response: Response | None = None,\n input: str | list[ResponseInputItemParam] | None = None,", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "HandoffSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class HandoffSpanData(SpanData):\n \"\"\"\n Represents a Handoff Span in the trace.\n Includes source and destination agents.\n \"\"\"\n __slots__ = (\"from_agent\", \"to_agent\")\n def __init__(self, from_agent: str | None, to_agent: str | None):\n self.from_agent = from_agent\n self.to_agent = to_agent\n @property", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "CustomSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class CustomSpanData(SpanData):\n \"\"\"\n Represents a Custom Span in the trace.\n Includes name and data property bag.\n \"\"\"\n __slots__ = (\"name\", \"data\")\n def __init__(self, name: str, data: dict[str, Any]):\n self.name = name\n self.data = data\n @property", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "GuardrailSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class GuardrailSpanData(SpanData):\n \"\"\"\n Represents a Guardrail Span in the trace.\n Includes name and triggered status.\n \"\"\"\n __slots__ = (\"name\", \"triggered\")\n def __init__(self, name: str, triggered: bool = False):\n self.name = name\n self.triggered = triggered\n @property", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "TranscriptionSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class TranscriptionSpanData(SpanData):\n \"\"\"\n Represents a Transcription Span in the trace.\n Includes input, output, model, and model configuration.\n \"\"\"\n __slots__ = (\n \"input\",\n \"output\",\n \"model\",\n \"model_config\",", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "SpeechSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class SpeechSpanData(SpanData):\n \"\"\"\n Represents a Speech Span in the trace.\n Includes input, output, model, model configuration, and first content timestamp.\n \"\"\"\n __slots__ = (\"input\", \"output\", \"model\", \"model_config\", \"first_content_at\")\n def __init__(\n self,\n input: str | None = None,\n output: str | None = None,", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "SpeechGroupSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class SpeechGroupSpanData(SpanData):\n \"\"\"\n Represents a Speech Group Span in the trace.\n \"\"\"\n __slots__ = \"input\"\n def __init__(\n self,\n input: str | None = None,\n ):\n self.input = input", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "MCPListToolsSpanData", + "kind": 6, + "importPath": "src.agents.tracing.span_data", + "description": "src.agents.tracing.span_data", + "peekOfCode": "class MCPListToolsSpanData(SpanData):\n \"\"\"\n Represents an MCP List Tools Span in the trace.\n Includes server and result.\n \"\"\"\n __slots__ = (\n \"server\",\n \"result\",\n )\n def __init__(self, server: str | None = None, result: list[str] | None = None):", + "detail": "src.agents.tracing.span_data", + "documentation": {} + }, + { + "label": "SpanError", + "kind": 6, + "importPath": "src.agents.tracing.spans", + "description": "src.agents.tracing.spans", + "peekOfCode": "class SpanError(TypedDict):\n message: str\n data: dict[str, Any] | None\nclass Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property\n @abc.abstractmethod", + "detail": "src.agents.tracing.spans", + "documentation": {} + }, + { + "label": "Span", + "kind": 6, + "importPath": "src.agents.tracing.spans", + "description": "src.agents.tracing.spans", + "peekOfCode": "class Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property\n @abc.abstractmethod\n def span_id(self) -> str:\n pass\n @property", + "detail": "src.agents.tracing.spans", + "documentation": {} + }, + { + "label": "NoOpSpan", + "kind": 6, + "importPath": "src.agents.tracing.spans", + "description": "src.agents.tracing.spans", + "peekOfCode": "class NoOpSpan(Span[TSpanData]):\n __slots__ = (\"_span_data\", \"_prev_span_token\")\n def __init__(self, span_data: TSpanData):\n self._span_data = span_data\n self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None\n @property\n def trace_id(self) -> str:\n return \"no-op\"\n @property\n def span_id(self) -> str:", + "detail": "src.agents.tracing.spans", + "documentation": {} + }, + { + "label": "SpanImpl", + "kind": 6, + "importPath": "src.agents.tracing.spans", + "description": "src.agents.tracing.spans", + "peekOfCode": "class SpanImpl(Span[TSpanData]):\n __slots__ = (\n \"_trace_id\",\n \"_span_id\",\n \"_parent_id\",\n \"_started_at\",\n \"_ended_at\",\n \"_error\",\n \"_prev_span_token\",\n \"_processor\",", + "detail": "src.agents.tracing.spans", + "documentation": {} + }, + { + "label": "TSpanData", + "kind": 5, + "importPath": "src.agents.tracing.spans", + "description": "src.agents.tracing.spans", + "peekOfCode": "TSpanData = TypeVar(\"TSpanData\", bound=SpanData)\nclass SpanError(TypedDict):\n message: str\n data: dict[str, Any] | None\nclass Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property", + "detail": "src.agents.tracing.spans", + "documentation": {} + }, + { + "label": "Trace", + "kind": 6, + "importPath": "src.agents.tracing.traces", + "description": "src.agents.tracing.traces", + "peekOfCode": "class Trace:\n \"\"\"\n A trace is the root level object that tracing creates. It represents a logical \"workflow\".\n \"\"\"\n @abc.abstractmethod\n def __enter__(self) -> Trace:\n pass\n @abc.abstractmethod\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass", + "detail": "src.agents.tracing.traces", + "documentation": {} + }, + { + "label": "NoOpTrace", + "kind": 6, + "importPath": "src.agents.tracing.traces", + "description": "src.agents.tracing.traces", + "peekOfCode": "class NoOpTrace(Trace):\n \"\"\"\n A no-op trace that will not be recorded.\n \"\"\"\n def __init__(self):\n self._started = False\n self._prev_context_token: contextvars.Token[Trace | None] | None = None\n def __enter__(self) -> Trace:\n if self._started:\n if not self._prev_context_token:", + "detail": "src.agents.tracing.traces", + "documentation": {} + }, + { + "label": "TraceImpl", + "kind": 6, + "importPath": "src.agents.tracing.traces", + "description": "src.agents.tracing.traces", + "peekOfCode": "class TraceImpl(Trace):\n \"\"\"\n A trace that will be recorded by the tracing library.\n \"\"\"\n __slots__ = (\n \"_name\",\n \"_trace_id\",\n \"group_id\",\n \"metadata\",\n \"_prev_context_token\",", + "detail": "src.agents.tracing.traces", + "documentation": {} + }, + { + "label": "NO_OP_TRACE", + "kind": 5, + "importPath": "src.agents.tracing.traces", + "description": "src.agents.tracing.traces", + "peekOfCode": "NO_OP_TRACE = NoOpTrace()\nclass TraceImpl(Trace):\n \"\"\"\n A trace that will be recorded by the tracing library.\n \"\"\"\n __slots__ = (\n \"_name\",\n \"_trace_id\",\n \"group_id\",\n \"metadata\",", + "detail": "src.agents.tracing.traces", + "documentation": {} + }, + { + "label": "time_iso", + "kind": 2, + "importPath": "src.agents.tracing.util", + "description": "src.agents.tracing.util", + "peekOfCode": "def time_iso() -> str:\n \"\"\"Return the current time in ISO 8601 format.\"\"\"\n return get_trace_provider().time_iso()\ndef gen_trace_id() -> str:\n \"\"\"Generate a new trace ID.\"\"\"\n return get_trace_provider().gen_trace_id()\ndef gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:", + "detail": "src.agents.tracing.util", + "documentation": {} + }, + { + "label": "gen_trace_id", + "kind": 2, + "importPath": "src.agents.tracing.util", + "description": "src.agents.tracing.util", + "peekOfCode": "def gen_trace_id() -> str:\n \"\"\"Generate a new trace ID.\"\"\"\n return get_trace_provider().gen_trace_id()\ndef gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", + "detail": "src.agents.tracing.util", + "documentation": {} + }, + { + "label": "gen_span_id", + "kind": 2, + "importPath": "src.agents.tracing.util", + "description": "src.agents.tracing.util", + "peekOfCode": "def gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", + "detail": "src.agents.tracing.util", + "documentation": {} + }, + { + "label": "gen_group_id", + "kind": 2, + "importPath": "src.agents.tracing.util", + "description": "src.agents.tracing.util", + "peekOfCode": "def gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", + "detail": "src.agents.tracing.util", + "documentation": {} + }, + { + "label": "attach_error_to_span", + "kind": 2, + "importPath": "src.agents.util._error_tracing", + "description": "src.agents.util._error_tracing", + "peekOfCode": "def attach_error_to_span(span: Span[Any], error: SpanError) -> None:\n span.set_error(error)\ndef attach_error_to_current_span(error: SpanError) -> None:\n span = get_current_span()\n if span:\n attach_error_to_span(span, error)\n else:\n logger.warning(f\"No span to add error {error} to\")", + "detail": "src.agents.util._error_tracing", + "documentation": {} + }, + { + "label": "attach_error_to_current_span", + "kind": 2, + "importPath": "src.agents.util._error_tracing", + "description": "src.agents.util._error_tracing", + "peekOfCode": "def attach_error_to_current_span(error: SpanError) -> None:\n span = get_current_span()\n if span:\n attach_error_to_span(span, error)\n else:\n logger.warning(f\"No span to add error {error} to\")", + "detail": "src.agents.util._error_tracing", + "documentation": {} + }, + { + "label": "validate_json", + "kind": 2, + "importPath": "src.agents.util._json", + "description": "src.agents.util._json", + "peekOfCode": "def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:\n partial_setting: bool | Literal[\"off\", \"on\", \"trailing-strings\"] = (\n \"trailing-strings\" if partial else False\n )\n try:\n validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)\n return validated\n except ValidationError as e:\n attach_error_to_current_span(\n SpanError(", + "detail": "src.agents.util._json", + "documentation": {} + }, + { + "label": "T", + "kind": 5, + "importPath": "src.agents.util._json", + "description": "src.agents.util._json", + "peekOfCode": "T = TypeVar(\"T\")\ndef validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:\n partial_setting: bool | Literal[\"off\", \"on\", \"trailing-strings\"] = (\n \"trailing-strings\" if partial else False\n )\n try:\n validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)\n return validated\n except ValidationError as e:\n attach_error_to_current_span(", + "detail": "src.agents.util._json", + "documentation": {} + }, + { + "label": "pretty_print_result", + "kind": 2, + "importPath": "src.agents.util._pretty_print", + "description": "src.agents.util._pretty_print", + "peekOfCode": "def pretty_print_result(result: \"RunResult\") -> str:\n output = \"RunResult:\"\n output += f'\\n- Last agent: Agent(name=\"{result.last_agent.name}\", ...)'\n output += (\n f\"\\n- Final output ({type(result.final_output).__name__}):\\n\"\n f\"{_indent(_final_output_str(result), 2)}\"\n )\n output += f\"\\n- {len(result.new_items)} new item(s)\"\n output += f\"\\n- {len(result.raw_responses)} raw response(s)\"\n output += f\"\\n- {len(result.input_guardrail_results)} input guardrail result(s)\"", + "detail": "src.agents.util._pretty_print", + "documentation": {} + }, + { + "label": "pretty_print_run_error_details", + "kind": 2, + "importPath": "src.agents.util._pretty_print", + "description": "src.agents.util._pretty_print", + "peekOfCode": "def pretty_print_run_error_details(result: \"RunErrorDetails\") -> str:\n output = \"RunErrorDetails:\"\n output += f'\\n- Last agent: Agent(name=\"{result.last_agent.name}\", ...)'\n output += f\"\\n- {len(result.new_items)} new item(s)\"\n output += f\"\\n- {len(result.raw_responses)} raw response(s)\"\n output += f\"\\n- {len(result.input_guardrail_results)} input guardrail result(s)\"\n output += \"\\n(See `RunErrorDetails` for more details)\"\n return output\ndef pretty_print_run_result_streaming(result: \"RunResultStreaming\") -> str:\n output = \"RunResultStreaming:\"", + "detail": "src.agents.util._pretty_print", + "documentation": {} + }, + { + "label": "pretty_print_run_result_streaming", + "kind": 2, + "importPath": "src.agents.util._pretty_print", + "description": "src.agents.util._pretty_print", + "peekOfCode": "def pretty_print_run_result_streaming(result: \"RunResultStreaming\") -> str:\n output = \"RunResultStreaming:\"\n output += f'\\n- Current agent: Agent(name=\"{result.current_agent.name}\", ...)'\n output += f\"\\n- Current turn: {result.current_turn}\"\n output += f\"\\n- Max turns: {result.max_turns}\"\n output += f\"\\n- Is complete: {result.is_complete}\"\n output += (\n f\"\\n- Final output ({type(result.final_output).__name__}):\\n\"\n f\"{_indent(_final_output_str(result), 2)}\"\n )", + "detail": "src.agents.util._pretty_print", + "documentation": {} + }, + { + "label": "transform_string_function_style", + "kind": 2, + "importPath": "src.agents.util._transforms", + "description": "src.agents.util._transforms", + "peekOfCode": "def transform_string_function_style(name: str) -> str:\n # Replace spaces with underscores\n name = name.replace(\" \", \"_\")\n # Replace non-alphanumeric characters with underscores\n name = re.sub(r\"[^a-zA-Z0-9]\", \"_\", name)\n return name.lower()", + "detail": "src.agents.util._transforms", + "documentation": {} + }, + { + "label": "T", + "kind": 5, + "importPath": "src.agents.util._types", + "description": "src.agents.util._types", + "peekOfCode": "T = TypeVar(\"T\")\nMaybeAwaitable = Union[Awaitable[T], T]", + "detail": "src.agents.util._types", + "documentation": {} + }, + { + "label": "MaybeAwaitable", + "kind": 5, + "importPath": "src.agents.util._types", + "description": "src.agents.util._types", + "peekOfCode": "MaybeAwaitable = Union[Awaitable[T], T]", + "detail": "src.agents.util._types", + "documentation": {} + }, + { + "label": "OpenAIVoiceModelProvider", + "kind": 6, + "importPath": "src.agents.voice.models.openai_model_provider", + "description": "src.agents.voice.models.openai_model_provider", + "peekOfCode": "class OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,\n project: str | None = None,", + "detail": "src.agents.voice.models.openai_model_provider", + "documentation": {} + }, + { + "label": "shared_http_client", + "kind": 2, + "importPath": "src.agents.voice.models.openai_model_provider", + "description": "src.agents.voice.models.openai_model_provider", + "peekOfCode": "def shared_http_client() -> httpx.AsyncClient:\n global _http_client\n if _http_client is None:\n _http_client = DefaultAsyncHttpxClient()\n return _http_client\nDEFAULT_STT_MODEL = \"gpt-4o-transcribe\"\nDEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(", + "detail": "src.agents.voice.models.openai_model_provider", + "documentation": {} + }, + { + "label": "DEFAULT_STT_MODEL", + "kind": 5, + "importPath": "src.agents.voice.models.openai_model_provider", + "description": "src.agents.voice.models.openai_model_provider", + "peekOfCode": "DEFAULT_STT_MODEL = \"gpt-4o-transcribe\"\nDEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,", + "detail": "src.agents.voice.models.openai_model_provider", + "documentation": {} + }, + { + "label": "DEFAULT_TTS_MODEL", + "kind": 5, + "importPath": "src.agents.voice.models.openai_model_provider", + "description": "src.agents.voice.models.openai_model_provider", + "peekOfCode": "DEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,", + "detail": "src.agents.voice.models.openai_model_provider", + "documentation": {} + }, + { + "label": "ErrorSentinel", + "kind": 6, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "class ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "SessionCompleteSentinel", + "kind": 6, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "class SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16\n concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0)\n concatenated_audio = (concatenated_audio * 32767).astype(np.int16)", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "WebsocketDoneSentinel", + "kind": 6, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "class WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16\n concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0)\n concatenated_audio = (concatenated_audio * 32767).astype(np.int16)\n audio_bytes = concatenated_audio.tobytes()\n return base64.b64encode(audio_bytes).decode(\"utf-8\")", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "OpenAISTTTranscriptionSession", + "kind": 6, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):\n \"\"\"A transcription session for OpenAI's STT model.\"\"\"\n def __init__(\n self,\n input: StreamedAudioInput,\n client: AsyncOpenAI,\n model: str,\n settings: STTModelSettings,\n trace_include_sensitive_data: bool,\n trace_include_sensitive_audio_data: bool,", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "OpenAISTTModel", + "kind": 6, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "class OpenAISTTModel(STTModel):\n \"\"\"A speech-to-text model for OpenAI.\"\"\"\n def __init__(\n self,\n model: str,\n openai_client: AsyncOpenAI,\n ):\n \"\"\"Create a new OpenAI speech-to-text model.\n Args:\n model: The name of the model to use.", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "EVENT_INACTIVITY_TIMEOUT", + "kind": 5, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "EVENT_INACTIVITY_TIMEOUT = 1000 # Timeout for inactivity in event processing\nSESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event\nSESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "SESSION_CREATION_TIMEOUT", + "kind": 5, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "SESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event\nSESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "SESSION_UPDATE_TIMEOUT", + "kind": 5, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "SESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "DEFAULT_TURN_DETECTION", + "kind": 5, + "importPath": "src.agents.voice.models.openai_stt", + "description": "src.agents.voice.models.openai_stt", + "peekOfCode": "DEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)", + "detail": "src.agents.voice.models.openai_stt", + "documentation": {} + }, + { + "label": "OpenAITTSModel", + "kind": 6, + "importPath": "src.agents.voice.models.openai_tts", + "description": "src.agents.voice.models.openai_tts", + "peekOfCode": "class OpenAITTSModel(TTSModel):\n \"\"\"A text-to-speech model for OpenAI.\"\"\"\n def __init__(\n self,\n model: str,\n openai_client: AsyncOpenAI,\n ):\n \"\"\"Create a new OpenAI text-to-speech model.\n Args:\n model: The name of the model to use.", + "detail": "src.agents.voice.models.openai_tts", + "documentation": {} + }, + { + "label": "VoiceStreamEventAudio", + "kind": 6, + "importPath": "src.agents.voice.events", + "description": "src.agents.voice.events", + "peekOfCode": "class VoiceStreamEventAudio:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n data: npt.NDArray[np.int16 | np.float32] | None\n \"\"\"The audio data.\"\"\"\n type: Literal[\"voice_stream_event_audio\"] = \"voice_stream_event_audio\"\n \"\"\"The type of event.\"\"\"\n@dataclass\nclass VoiceStreamEventLifecycle:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n event: Literal[\"turn_started\", \"turn_ended\", \"session_ended\"]", + "detail": "src.agents.voice.events", + "documentation": {} + }, + { + "label": "VoiceStreamEventLifecycle", + "kind": 6, + "importPath": "src.agents.voice.events", + "description": "src.agents.voice.events", + "peekOfCode": "class VoiceStreamEventLifecycle:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n event: Literal[\"turn_started\", \"turn_ended\", \"session_ended\"]\n \"\"\"The event that occurred.\"\"\"\n type: Literal[\"voice_stream_event_lifecycle\"] = \"voice_stream_event_lifecycle\"\n \"\"\"The type of event.\"\"\"\n@dataclass\nclass VoiceStreamEventError:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n error: Exception", + "detail": "src.agents.voice.events", + "documentation": {} + }, + { + "label": "VoiceStreamEventError", + "kind": 6, + "importPath": "src.agents.voice.events", + "description": "src.agents.voice.events", + "peekOfCode": "class VoiceStreamEventError:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n error: Exception\n \"\"\"The error that occurred.\"\"\"\n type: Literal[\"voice_stream_event_error\"] = \"voice_stream_event_error\"\n \"\"\"The type of event.\"\"\"\nVoiceStreamEvent: TypeAlias = Union[\n VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError\n]\n\"\"\"An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`.\"\"\"", + "detail": "src.agents.voice.events", + "documentation": {} + }, + { + "label": "STTWebsocketConnectionError", + "kind": 6, + "importPath": "src.agents.voice.exceptions", + "description": "src.agents.voice.exceptions", + "peekOfCode": "class STTWebsocketConnectionError(AgentsException):\n \"\"\"Exception raised when the STT websocket connection fails.\"\"\"\n def __init__(self, message: str):\n self.message = message", + "detail": "src.agents.voice.exceptions", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "src.agents.voice.imports", + "description": "src.agents.voice.imports", + "peekOfCode": "__all__ = [\"np\", \"npt\", \"websockets\"]", + "detail": "src.agents.voice.imports", + "documentation": {} + }, + { + "label": "AudioInput", + "kind": 6, + "importPath": "src.agents.voice.input", + "description": "src.agents.voice.input", + "peekOfCode": "class AudioInput:\n \"\"\"Static audio to be used as input for the VoicePipeline.\"\"\"\n buffer: npt.NDArray[np.int16 | np.float32]\n \"\"\"\n A buffer containing the audio data for the agent. Must be a numpy array of int16 or float32.\n \"\"\"\n frame_rate: int = DEFAULT_SAMPLE_RATE\n \"\"\"The sample rate of the audio data. Defaults to 24000.\"\"\"\n sample_width: int = 2\n \"\"\"The sample width of the audio data. Defaults to 2.\"\"\"", + "detail": "src.agents.voice.input", + "documentation": {} + }, + { + "label": "StreamedAudioInput", + "kind": 6, + "importPath": "src.agents.voice.input", + "description": "src.agents.voice.input", + "peekOfCode": "class StreamedAudioInput:\n \"\"\"Audio input represented as a stream of audio data. You can pass this to the `VoicePipeline`\n and then push audio data into the queue using the `add_audio` method.\n \"\"\"\n def __init__(self):\n self.queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = asyncio.Queue()\n async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]):\n \"\"\"Adds more audio data to the stream.\n Args:\n audio: The audio data to add. Must be a numpy array of int16 or float32.", + "detail": "src.agents.voice.input", + "documentation": {} + }, + { + "label": "DEFAULT_SAMPLE_RATE", + "kind": 5, + "importPath": "src.agents.voice.input", + "description": "src.agents.voice.input", + "peekOfCode": "DEFAULT_SAMPLE_RATE = 24000\ndef _buffer_to_audio_file(\n buffer: npt.NDArray[np.int16 | np.float32],\n frame_rate: int = DEFAULT_SAMPLE_RATE,\n sample_width: int = 2,\n channels: int = 1,\n) -> tuple[str, io.BytesIO, str]:\n if buffer.dtype == np.float32:\n # convert to int16\n buffer = np.clip(buffer, -1.0, 1.0)", + "detail": "src.agents.voice.input", + "documentation": {} + }, + { + "label": "TTSModelSettings", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.\n \"\"\"\n buffer_size: int = 120\n \"\"\"The minimal size of the chunks of audio data that are being streamed out.\"\"\"\n dtype: npt.DTypeLike = np.int16", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "TTSModel", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class TTSModel(abc.ABC):\n \"\"\"A text-to-speech model that can convert text into audio output.\"\"\"\n @property\n @abc.abstractmethod\n def model_name(self) -> str:\n \"\"\"The name of the TTS model.\"\"\"\n pass\n @abc.abstractmethod\n def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:\n \"\"\"Given a text string, produces a stream of audio bytes, in PCM format.", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "StreamedTranscriptionSession", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class StreamedTranscriptionSession(abc.ABC):\n \"\"\"A streamed transcription of audio input.\"\"\"\n @abc.abstractmethod\n def transcribe_turns(self) -> AsyncIterator[str]:\n \"\"\"Yields a stream of text transcriptions. Each transcription is a turn in the conversation.\n This method is expected to return only after `close()` is called.\n \"\"\"\n pass\n @abc.abstractmethod\n async def close(self) -> None:", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "STTModelSettings", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class STTModelSettings:\n \"\"\"Settings for a speech-to-text model.\"\"\"\n prompt: str | None = None\n \"\"\"Instructions for the model to follow.\"\"\"\n language: str | None = None\n \"\"\"The language of the audio input.\"\"\"\n temperature: float | None = None\n \"\"\"The temperature of the model.\"\"\"\n turn_detection: dict[str, Any] | None = None\n \"\"\"The turn detection settings for the model when using streamed audio input.\"\"\"", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "STTModel", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class STTModel(abc.ABC):\n \"\"\"A speech-to-text model that can convert audio input into text.\"\"\"\n @property\n @abc.abstractmethod\n def model_name(self) -> str:\n \"\"\"The name of the STT model.\"\"\"\n pass\n @abc.abstractmethod\n async def transcribe(\n self,", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "VoiceModelProvider", + "kind": 6, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "class VoiceModelProvider(abc.ABC):\n \"\"\"The base interface for a voice model provider.\n A model provider is responsible for creating speech-to-text and text-to-speech models, given a\n name.\n \"\"\"\n @abc.abstractmethod\n def get_stt_model(self, model_name: str | None) -> STTModel:\n \"\"\"Get a speech-to-text model by name.\n Args:\n model_name: The name of the model to get.", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "DEFAULT_TTS_INSTRUCTIONS", + "kind": 5, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "DEFAULT_TTS_INSTRUCTIONS = (\n \"You will receive partial sentences. Do not complete the sentence, just read out the text.\"\n)\nDEFAULT_TTS_BUFFER_SIZE = 120\nTTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "DEFAULT_TTS_BUFFER_SIZE", + "kind": 5, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "DEFAULT_TTS_BUFFER_SIZE = 120\nTTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "TTSVoice", + "kind": 5, + "importPath": "src.agents.voice.model", + "description": "src.agents.voice.model", + "peekOfCode": "TTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.\n \"\"\"", + "detail": "src.agents.voice.model", + "documentation": {} + }, + { + "label": "VoicePipeline", + "kind": 6, + "importPath": "src.agents.voice.pipeline", + "description": "src.agents.voice.pipeline", + "peekOfCode": "class VoicePipeline:\n \"\"\"An opinionated voice agent pipeline. It works in three steps:\n 1. Transcribe audio input into text.\n 2. Run the provided `workflow`, which produces a sequence of text responses.\n 3. Convert the text responses into streaming audio output.\n \"\"\"\n def __init__(\n self,\n *,\n workflow: VoiceWorkflowBase,", + "detail": "src.agents.voice.pipeline", + "documentation": {} + }, + { + "label": "VoicePipelineConfig", + "kind": 6, + "importPath": "src.agents.voice.pipeline_config", + "description": "src.agents.voice.pipeline_config", + "peekOfCode": "class VoicePipelineConfig:\n \"\"\"Configuration for a `VoicePipeline`.\"\"\"\n model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider)\n \"\"\"The voice model provider to use for the pipeline. Defaults to OpenAI.\"\"\"\n tracing_disabled: bool = False\n \"\"\"Whether to disable tracing of the pipeline. Defaults to `False`.\"\"\"\n trace_include_sensitive_data: bool = True\n \"\"\"Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the\n voice pipeline, and not for anything that goes on inside your Workflow.\"\"\"\n trace_include_sensitive_audio_data: bool = True", + "detail": "src.agents.voice.pipeline_config", + "documentation": {} + }, + { + "label": "StreamedAudioResult", + "kind": 6, + "importPath": "src.agents.voice.result", + "description": "src.agents.voice.result", + "peekOfCode": "class StreamedAudioResult:\n \"\"\"The output of a `VoicePipeline`. Streams events and audio data as they're generated.\"\"\"\n def __init__(\n self,\n tts_model: TTSModel,\n tts_settings: TTSModelSettings,\n voice_pipeline_config: VoicePipelineConfig,\n ):\n \"\"\"Create a new `StreamedAudioResult` instance.\n Args:", + "detail": "src.agents.voice.result", + "documentation": {} + }, + { + "label": "get_sentence_based_splitter", + "kind": 2, + "importPath": "src.agents.voice.utils", + "description": "src.agents.voice.utils", + "peekOfCode": "def get_sentence_based_splitter(\n min_sentence_length: int = 20,\n) -> Callable[[str], tuple[str, str]]:\n \"\"\"Returns a function that splits text into chunks based on sentence boundaries.\n Args:\n min_sentence_length: The minimum length of a sentence to be included in a chunk.\n Returns:\n A function that splits text into chunks based on sentence boundaries.\n \"\"\"\n def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]:", + "detail": "src.agents.voice.utils", + "documentation": {} + }, + { + "label": "VoiceWorkflowBase", + "kind": 6, + "importPath": "src.agents.voice.workflow", + "description": "src.agents.voice.workflow", + "peekOfCode": "class VoiceWorkflowBase(abc.ABC):\n \"\"\"\n A base class for a voice workflow. You must implement the `run` method. A \"workflow\" is any\n code you want, that receives a transcription and yields text that will be turned into speech\n by a text-to-speech model.\n In most cases, you'll create `Agent`s and use `Runner.run_streamed()` to run them, returning\n some or all of the text events from the stream. You can use the `VoiceWorkflowHelper` class to\n help with extracting text events from the stream.\n If you have a simple workflow that has a single starting agent and no custom logic, you can\n use `SingleAgentVoiceWorkflow` directly.", + "detail": "src.agents.voice.workflow", + "documentation": {} + }, + { + "label": "VoiceWorkflowHelper", + "kind": 6, + "importPath": "src.agents.voice.workflow", + "description": "src.agents.voice.workflow", + "peekOfCode": "class VoiceWorkflowHelper:\n @classmethod\n async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]:\n \"\"\"Wraps a `RunResultStreaming` object and yields text events from the stream.\"\"\"\n async for event in result.stream_events():\n if (\n event.type == \"raw_response_event\"\n and event.data.type == \"response.output_text.delta\"\n ):\n yield event.data.delta", + "detail": "src.agents.voice.workflow", + "documentation": {} + }, + { + "label": "SingleAgentWorkflowCallbacks", + "kind": 6, + "importPath": "src.agents.voice.workflow", + "description": "src.agents.voice.workflow", + "peekOfCode": "class SingleAgentWorkflowCallbacks:\n def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:\n \"\"\"Called when the workflow is run.\"\"\"\n pass\nclass SingleAgentVoiceWorkflow(VoiceWorkflowBase):\n \"\"\"A simple voice workflow that runs a single agent. Each transcription and result is added to\n the input history.\n For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic,\n custom configs), subclass `VoiceWorkflowBase` and implement your own logic.\n \"\"\"", + "detail": "src.agents.voice.workflow", + "documentation": {} + }, + { + "label": "SingleAgentVoiceWorkflow", + "kind": 6, + "importPath": "src.agents.voice.workflow", + "description": "src.agents.voice.workflow", + "peekOfCode": "class SingleAgentVoiceWorkflow(VoiceWorkflowBase):\n \"\"\"A simple voice workflow that runs a single agent. Each transcription and result is added to\n the input history.\n For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic,\n custom configs), subclass `VoiceWorkflowBase` and implement your own logic.\n \"\"\"\n def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None):\n \"\"\"Create a new single agent voice workflow.\n Args:\n agent: The agent to run.", + "detail": "src.agents.voice.workflow", + "documentation": {} + }, + { + "label": "set_default_openai_key", + "kind": 2, + "importPath": "src.agents._config", + "description": "src.agents._config", + "peekOfCode": "def set_default_openai_key(key: str, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_key(key)\n if use_for_tracing:\n set_tracing_export_api_key(key)\ndef set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_client(client)\n if use_for_tracing:\n set_tracing_export_api_key(client.api_key)\ndef set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":", + "detail": "src.agents._config", + "documentation": {} + }, + { + "label": "set_default_openai_client", + "kind": 2, + "importPath": "src.agents._config", + "description": "src.agents._config", + "peekOfCode": "def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_client(client)\n if use_for_tracing:\n set_tracing_export_api_key(client.api_key)\ndef set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":\n _openai_shared.set_use_responses_by_default(False)\n else:\n _openai_shared.set_use_responses_by_default(True)", + "detail": "src.agents._config", + "documentation": {} + }, + { + "label": "set_default_openai_api", + "kind": 2, + "importPath": "src.agents._config", + "description": "src.agents._config", + "peekOfCode": "def set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":\n _openai_shared.set_use_responses_by_default(False)\n else:\n _openai_shared.set_use_responses_by_default(True)", + "detail": "src.agents._config", + "documentation": {} + }, + { + "label": "DONT_LOG_MODEL_DATA", + "kind": 5, + "importPath": "src.agents._debug", + "description": "src.agents._debug", + "peekOfCode": "DONT_LOG_MODEL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_MODEL_DATA\")\n\"\"\"By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this\nflag to enable logging them.\n\"\"\"\nDONT_LOG_TOOL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_TOOL_DATA\")\n\"\"\"By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set\nthis flag to enable logging them.\n\"\"\"", + "detail": "src.agents._debug", + "documentation": {} + }, + { + "label": "DONT_LOG_TOOL_DATA", + "kind": 5, + "importPath": "src.agents._debug", + "description": "src.agents._debug", + "peekOfCode": "DONT_LOG_TOOL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_TOOL_DATA\")\n\"\"\"By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set\nthis flag to enable logging them.\n\"\"\"", + "detail": "src.agents._debug", + "documentation": {} + }, + { + "label": "QueueCompleteSentinel", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class QueueCompleteSentinel:\n pass\nQUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel()\n_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "AgentToolUseTracker", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)\n else:\n self.agent_to_tools.append((agent, tool_names))\n def has_used_tools(self, agent: Agent[Any]) -> bool:", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ToolRunHandoff", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class ToolRunHandoff:\n handoff: Handoff\n tool_call: ResponseFunctionToolCall\n@dataclass\nclass ToolRunMCPApprovalRequest:\n request_item: McpApprovalRequest\n mcp_tool: HostedMCPTool\n@dataclass\nclass ToolRunLocalShellCall:\n tool_call: LocalShellCall", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ToolRunMCPApprovalRequest", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class ToolRunMCPApprovalRequest:\n request_item: McpApprovalRequest\n mcp_tool: HostedMCPTool\n@dataclass\nclass ToolRunLocalShellCall:\n tool_call: LocalShellCall\n local_shell_tool: LocalShellTool\n@dataclass\nclass ProcessedResponse:\n new_items: list[RunItem]", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ToolRunLocalShellCall", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class ToolRunLocalShellCall:\n tool_call: LocalShellCall\n local_shell_tool: LocalShellTool\n@dataclass\nclass ProcessedResponse:\n new_items: list[RunItem]\n handoffs: list[ToolRunHandoff]\n functions: list[ToolRunFunction]\n computer_actions: list[ToolRunComputerAction]\n local_shell_calls: list[ToolRunLocalShellCall]", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ProcessedResponse", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class ProcessedResponse:\n new_items: list[RunItem]\n handoffs: list[ToolRunHandoff]\n functions: list[ToolRunFunction]\n computer_actions: list[ToolRunComputerAction]\n local_shell_calls: list[ToolRunLocalShellCall]\n tools_used: list[str] # Names of all tools used, including hosted tools\n mcp_approval_requests: list[ToolRunMCPApprovalRequest] # Only requests with callbacks\n def has_tools_or_approvals_to_run(self) -> bool:\n # Handoffs, functions and computer actions need local processing", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepHandoff", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class NextStepHandoff:\n new_agent: Agent[Any]\n@dataclass\nclass NextStepFinalOutput:\n output: Any\n@dataclass\nclass NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepFinalOutput", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class NextStepFinalOutput:\n output: Any\n@dataclass\nclass NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "NextStepRunAgain", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"\n model_response: ModelResponse\n \"\"\"The model response for the current step.\"\"\"\n pre_step_items: list[RunItem]", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "SingleStepResult", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"\n model_response: ModelResponse\n \"\"\"The model response for the current step.\"\"\"\n pre_step_items: list[RunItem]\n \"\"\"Items generated before the current step.\"\"\"\n new_step_items: list[RunItem]\n \"\"\"Items generated during this current step.\"\"\"", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "RunImpl", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class RunImpl:\n @classmethod\n async def execute_tools_and_side_effects(\n cls,\n *,\n agent: Agent[TContext],\n # The original input to the Runner\n original_input: str | list[TResponseInputItem],\n # Everything generated by Runner since the original input, but before the current step\n pre_step_items: list[RunItem],", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "TraceCtxManager", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class TraceCtxManager:\n \"\"\"Creates a trace only if there is no current trace, and manages the trace lifecycle.\"\"\"\n def __init__(\n self,\n workflow_name: str,\n trace_id: str | None,\n group_id: str | None,\n metadata: dict[str, Any] | None,\n disabled: bool,\n ):", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ComputerAction", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class ComputerAction:\n @classmethod\n async def execute(\n cls,\n *,\n agent: Agent[TContext],\n action: ToolRunComputerAction,\n hooks: RunHooks[TContext],\n context_wrapper: RunContextWrapper[TContext],\n config: RunConfig,", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "LocalShellAction", + "kind": 6, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "class LocalShellAction:\n @classmethod\n async def execute(\n cls,\n *,\n agent: Agent[TContext],\n call: ToolRunLocalShellCall,\n hooks: RunHooks[TContext],\n context_wrapper: RunContextWrapper[TContext],\n config: RunConfig,", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "get_model_tracing_impl", + "kind": 2, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "def get_model_tracing_impl(\n tracing_disabled: bool, trace_include_sensitive_data: bool\n) -> ModelTracing:\n if tracing_disabled:\n return ModelTracing.DISABLED\n elif trace_include_sensitive_data:\n return ModelTracing.ENABLED\n else:\n return ModelTracing.ENABLED_WITHOUT_DATA\nclass RunImpl:", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "QUEUE_COMPLETE_SENTINEL", + "kind": 5, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "QUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel()\n_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "_NOT_FINAL_OUTPUT", + "kind": 5, + "importPath": "src.agents._run_impl", + "description": "src.agents._run_impl", + "peekOfCode": "_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)\n else:", + "detail": "src.agents._run_impl", + "documentation": {} + }, + { + "label": "ToolsToFinalOutputResult", + "kind": 6, + "importPath": "src.agents.agent", + "description": "src.agents.agent", + "peekOfCode": "class ToolsToFinalOutputResult:\n is_final_output: bool\n \"\"\"Whether this is the final output. If False, the LLM will run again and receive the tool call\n output.\n \"\"\"\n final_output: Any | None = None\n \"\"\"The final output. Can be None if `is_final_output` is False, otherwise must match the\n `output_type` of the agent.\n \"\"\"\nToolsToFinalOutputFunction: TypeAlias = Callable[", + "detail": "src.agents.agent", + "documentation": {} + }, + { + "label": "StopAtTools", + "kind": 6, + "importPath": "src.agents.agent", + "description": "src.agents.agent", + "peekOfCode": "class StopAtTools(TypedDict):\n stop_at_tool_names: list[str]\n \"\"\"A list of tool names, any of which will stop the agent from running further.\"\"\"\nclass MCPConfig(TypedDict):\n \"\"\"Configuration for MCP servers.\"\"\"\n convert_schemas_to_strict: NotRequired[bool]\n \"\"\"If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a\n best-effort conversion, so some schemas may not be convertible. Defaults to False.\n \"\"\"\n@dataclass", + "detail": "src.agents.agent", + "documentation": {} + }, + { + "label": "MCPConfig", + "kind": 6, + "importPath": "src.agents.agent", + "description": "src.agents.agent", + "peekOfCode": "class MCPConfig(TypedDict):\n \"\"\"Configuration for MCP servers.\"\"\"\n convert_schemas_to_strict: NotRequired[bool]\n \"\"\"If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a\n best-effort conversion, so some schemas may not be convertible. Defaults to False.\n \"\"\"\n@dataclass\nclass AgentBase(Generic[TContext]):\n \"\"\"Base class for `Agent` and `RealtimeAgent`.\"\"\"\n name: str", + "detail": "src.agents.agent", + "documentation": {} + }, + { + "label": "AgentBase", + "kind": 6, + "importPath": "src.agents.agent", + "description": "src.agents.agent", + "peekOfCode": "class AgentBase(Generic[TContext]):\n \"\"\"Base class for `Agent` and `RealtimeAgent`.\"\"\"\n name: str\n \"\"\"The name of the agent.\"\"\"\n handoff_description: str | None = None\n \"\"\"A description of the agent. This is used when the agent is used as a handoff, so that an\n LLM knows what it does and when to invoke it.\n \"\"\"\n tools: list[Tool] = field(default_factory=list)\n \"\"\"A list of tools that the agent can use.\"\"\"", + "detail": "src.agents.agent", + "documentation": {} + }, + { + "label": "Agent", + "kind": 6, + "importPath": "src.agents.agent", + "description": "src.agents.agent", + "peekOfCode": "class Agent(AgentBase, Generic[TContext]):\n \"\"\"An agent is an AI model configured with instructions, tools, guardrails, handoffs and more.\n We strongly recommend passing `instructions`, which is the \"system prompt\" for the agent. In\n addition, you can pass `handoff_description`, which is a human-readable description of the\n agent, used when the agent is used inside tools/handoffs.\n Agents are generic on the context type. The context is a (mutable) object you create. It is\n passed to tool functions, handoffs, guardrails, etc.\n See `AgentBase` for base parameters that are shared with `RealtimeAgent`s.\n \"\"\"\n instructions: (", + "detail": "src.agents.agent", + "documentation": {} + }, + { + "label": "AgentOutputSchemaBase", + "kind": 6, + "importPath": "src.agents.agent_output", + "description": "src.agents.agent_output", + "peekOfCode": "class AgentOutputSchemaBase(abc.ABC):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n @abc.abstractmethod\n def is_plain_text(self) -> bool:\n \"\"\"Whether the output type is plain text (versus a JSON object).\"\"\"\n pass\n @abc.abstractmethod\n def name(self) -> str:", + "detail": "src.agents.agent_output", + "documentation": {} + }, + { + "label": "AgentOutputSchema", + "kind": 6, + "importPath": "src.agents.agent_output", + "description": "src.agents.agent_output", + "peekOfCode": "class AgentOutputSchema(AgentOutputSchemaBase):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n output_type: type[Any]\n \"\"\"The type of the output.\"\"\"\n _type_adapter: TypeAdapter[Any]\n \"\"\"A type adapter that wraps the output type, so that we can validate JSON.\"\"\"\n _is_wrapped: bool\n \"\"\"Whether the output type is wrapped in a dictionary. This is generally done if the base", + "detail": "src.agents.agent_output", + "documentation": {} + }, + { + "label": "_WRAPPER_DICT_KEY", + "kind": 5, + "importPath": "src.agents.agent_output", + "description": "src.agents.agent_output", + "peekOfCode": "_WRAPPER_DICT_KEY = \"response\"\nclass AgentOutputSchemaBase(abc.ABC):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n @abc.abstractmethod\n def is_plain_text(self) -> bool:\n \"\"\"Whether the output type is plain text (versus a JSON object).\"\"\"\n pass\n @abc.abstractmethod", + "detail": "src.agents.agent_output", + "documentation": {} + }, + { + "label": "Computer", + "kind": 6, + "importPath": "src.agents.computer", + "description": "src.agents.computer", + "peekOfCode": "class Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod\n def dimensions(self) -> tuple[int, int]:", + "detail": "src.agents.computer", + "documentation": {} + }, + { + "label": "AsyncComputer", + "kind": 6, + "importPath": "src.agents.computer", + "description": "src.agents.computer", + "peekOfCode": "class AsyncComputer(abc.ABC):\n \"\"\"A computer implemented with async operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod\n def dimensions(self) -> tuple[int, int]:", + "detail": "src.agents.computer", + "documentation": {} + }, + { + "label": "Environment", + "kind": 5, + "importPath": "src.agents.computer", + "description": "src.agents.computer", + "peekOfCode": "Environment = Literal[\"mac\", \"windows\", \"ubuntu\", \"browser\"]\nButton = Literal[\"left\", \"right\", \"wheel\", \"back\", \"forward\"]\nclass Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property", + "detail": "src.agents.computer", + "documentation": {} + }, + { + "label": "Button", + "kind": 5, + "importPath": "src.agents.computer", + "description": "src.agents.computer", + "peekOfCode": "Button = Literal[\"left\", \"right\", \"wheel\", \"back\", \"forward\"]\nclass Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod", + "detail": "src.agents.computer", + "documentation": {} + }, + { + "label": "RunErrorDetails", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class RunErrorDetails:\n \"\"\"Data collected from an agent run when an exception occurs.\"\"\"\n input: str | list[TResponseInputItem]\n new_items: list[RunItem]\n raw_responses: list[ModelResponse]\n last_agent: Agent[Any]\n context_wrapper: RunContextWrapper[Any]\n input_guardrail_results: list[InputGuardrailResult]\n output_guardrail_results: list[OutputGuardrailResult]\n def __str__(self) -> str:", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "AgentsException", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class AgentsException(Exception):\n \"\"\"Base class for all exceptions in the Agents SDK.\"\"\"\n run_data: RunErrorDetails | None\n def __init__(self, *args: object) -> None:\n super().__init__(*args)\n self.run_data = None\nclass MaxTurnsExceeded(AgentsException):\n \"\"\"Exception raised when the maximum number of turns is exceeded.\"\"\"\n message: str\n def __init__(self, message: str):", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "MaxTurnsExceeded", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class MaxTurnsExceeded(AgentsException):\n \"\"\"Exception raised when the maximum number of turns is exceeded.\"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass ModelBehaviorError(AgentsException):\n \"\"\"Exception raised when the model does something unexpected, e.g. calling a tool that doesn't\n exist, or providing malformed JSON.\n \"\"\"", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "ModelBehaviorError", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class ModelBehaviorError(AgentsException):\n \"\"\"Exception raised when the model does something unexpected, e.g. calling a tool that doesn't\n exist, or providing malformed JSON.\n \"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass UserError(AgentsException):\n \"\"\"Exception raised when the user makes an error using the SDK.\"\"\"", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "UserError", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class UserError(AgentsException):\n \"\"\"Exception raised when the user makes an error using the SDK.\"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass InputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: InputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "InputGuardrailTripwireTriggered", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class InputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: InputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"\n def __init__(self, guardrail_result: InputGuardrailResult):\n self.guardrail_result = guardrail_result\n super().__init__(\n f\"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire\"\n )\nclass OutputGuardrailTripwireTriggered(AgentsException):", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "OutputGuardrailTripwireTriggered", + "kind": 6, + "importPath": "src.agents.exceptions", + "description": "src.agents.exceptions", + "peekOfCode": "class OutputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: OutputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"\n def __init__(self, guardrail_result: OutputGuardrailResult):\n self.guardrail_result = guardrail_result\n super().__init__(\n f\"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire\"\n )", + "detail": "src.agents.exceptions", + "documentation": {} + }, + { + "label": "FuncSchema", + "kind": 6, + "importPath": "src.agents.function_schema", + "description": "src.agents.function_schema", + "peekOfCode": "class FuncSchema:\n \"\"\"\n Captures the schema for a python function, in preparation for sending it to an LLM as a tool.\n \"\"\"\n name: str\n \"\"\"The name of the function.\"\"\"\n description: str | None\n \"\"\"The description of the function.\"\"\"\n params_pydantic_model: type[BaseModel]\n \"\"\"A Pydantic model that represents the function's parameters.\"\"\"", + "detail": "src.agents.function_schema", + "documentation": {} + }, + { + "label": "FuncDocumentation", + "kind": 6, + "importPath": "src.agents.function_schema", + "description": "src.agents.function_schema", + "peekOfCode": "class FuncDocumentation:\n \"\"\"Contains metadata about a python function, extracted from its docstring.\"\"\"\n name: str\n \"\"\"The name of the function, via `__name__`.\"\"\"\n description: str | None\n \"\"\"The description of the function, derived from the docstring.\"\"\"\n param_descriptions: dict[str, str] | None\n \"\"\"The parameter descriptions of the function, derived from the docstring.\"\"\"\nDocstringStyle = Literal[\"google\", \"numpy\", \"sphinx\"]\n# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This", + "detail": "src.agents.function_schema", + "documentation": {} + }, + { + "label": "generate_func_documentation", + "kind": 2, + "importPath": "src.agents.function_schema", + "description": "src.agents.function_schema", + "peekOfCode": "def generate_func_documentation(\n func: Callable[..., Any], style: DocstringStyle | None = None\n) -> FuncDocumentation:\n \"\"\"\n Extracts metadata from a function docstring, in preparation for sending it to an LLM as a tool.\n Args:\n func: The function to extract documentation from.\n style: The style of the docstring to use for parsing. If not provided, we will attempt to\n auto-detect the style.\n Returns:", + "detail": "src.agents.function_schema", + "documentation": {} + }, + { + "label": "function_schema", + "kind": 2, + "importPath": "src.agents.function_schema", + "description": "src.agents.function_schema", + "peekOfCode": "def function_schema(\n func: Callable[..., Any],\n docstring_style: DocstringStyle | None = None,\n name_override: str | None = None,\n description_override: str | None = None,\n use_docstring_info: bool = True,\n strict_json_schema: bool = True,\n) -> FuncSchema:\n \"\"\"\n Given a python function, extracts a `FuncSchema` from it, capturing the name, description,", + "detail": "src.agents.function_schema", + "documentation": {} + }, + { + "label": "DocstringStyle", + "kind": 5, + "importPath": "src.agents.function_schema", + "description": "src.agents.function_schema", + "peekOfCode": "DocstringStyle = Literal[\"google\", \"numpy\", \"sphinx\"]\n# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This\n# code approximates it.\ndef _detect_docstring_style(doc: str) -> DocstringStyle:\n scores: dict[DocstringStyle, int] = {\"sphinx\": 0, \"numpy\": 0, \"google\": 0}\n # Sphinx style detection: look for :param, :type, :return:, and :rtype:\n sphinx_patterns = [r\"^:param\\s\", r\"^:type\\s\", r\"^:return:\", r\"^:rtype:\"]\n for pattern in sphinx_patterns:\n if re.search(pattern, doc, re.MULTILINE):\n scores[\"sphinx\"] += 1", + "detail": "src.agents.function_schema", + "documentation": {} + }, + { + "label": "GuardrailFunctionOutput", + "kind": 6, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "class GuardrailFunctionOutput:\n \"\"\"The output of a guardrail function.\"\"\"\n output_info: Any\n \"\"\"\n Optional information about the guardrail's output. For example, the guardrail could include\n information about the checks it performed and granular results.\n \"\"\"\n tripwire_triggered: bool\n \"\"\"\n Whether the tripwire was triggered. If triggered, the agent's execution will be halted.", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "InputGuardrailResult", + "kind": 6, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "class InputGuardrailResult:\n \"\"\"The result of a guardrail run.\"\"\"\n guardrail: InputGuardrail[Any]\n \"\"\"\n The guardrail that was run.\n \"\"\"\n output: GuardrailFunctionOutput\n \"\"\"The output of the guardrail function.\"\"\"\n@dataclass\nclass OutputGuardrailResult:", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "OutputGuardrailResult", + "kind": 6, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "class OutputGuardrailResult:\n \"\"\"The result of a guardrail run.\"\"\"\n guardrail: OutputGuardrail[Any]\n \"\"\"\n The guardrail that was run.\n \"\"\"\n agent_output: Any\n \"\"\"\n The output of the agent that was checked by the guardrail.\n \"\"\"", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "InputGuardrail", + "kind": 6, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "class InputGuardrail(Generic[TContext]):\n \"\"\"Input guardrails are checks that run in parallel to the agent's execution.\n They can be used to do things like:\n - Check if input messages are off-topic\n - Take over control of the agent's execution if an unexpected input is detected\n You can use the `@input_guardrail()` decorator to turn a function into an `InputGuardrail`, or\n create an `InputGuardrail` manually.\n Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, the agent\n execution will immediately stop and a `InputGuardrailTripwireTriggered` exception will be raised\n \"\"\"", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "OutputGuardrail", + "kind": 6, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "class OutputGuardrail(Generic[TContext]):\n \"\"\"Output guardrails are checks that run on the final output of an agent.\n They can be used to do check if the output passes certain validation criteria\n You can use the `@output_guardrail()` decorator to turn a function into an `OutputGuardrail`,\n or create an `OutputGuardrail` manually.\n Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, a\n `OutputGuardrailTripwireTriggered` exception will be raised.\n \"\"\"\n guardrail_function: Callable[\n [RunContextWrapper[TContext], Agent[Any], Any],", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "input_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n func: _InputGuardrailFuncAsync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n *,", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "input_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncAsync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],\n InputGuardrail[TContext_co],", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "input_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def input_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],\n InputGuardrail[TContext_co],\n]: ...\ndef input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co]\n | _InputGuardrailFuncAsync[TContext_co]", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "input_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co]\n | _InputGuardrailFuncAsync[TContext_co]\n | None = None,\n *,\n name: str | None = None,\n) -> (\n InputGuardrail[TContext_co]\n | Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "output_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n func: _OutputGuardrailFuncAsync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n *,", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "output_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncAsync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],\n OutputGuardrail[TContext_co],", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "output_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def output_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],\n OutputGuardrail[TContext_co],\n]: ...\ndef output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co]\n | _OutputGuardrailFuncAsync[TContext_co]", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "output_guardrail", + "kind": 2, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co]\n | _OutputGuardrailFuncAsync[TContext_co]\n | None = None,\n *,\n name: str | None = None,\n) -> (\n OutputGuardrail[TContext_co]\n | Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "TContext_co", + "kind": 5, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "TContext_co = TypeVar(\"TContext_co\", bound=Any, covariant=True)\n# For InputGuardrail\n_InputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n GuardrailFunctionOutput,\n]\n_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "_InputGuardrailFuncSync", + "kind": 5, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "_InputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n GuardrailFunctionOutput,\n]\n_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef input_guardrail(", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "_InputGuardrailFuncAsync", + "kind": 5, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "_OutputGuardrailFuncSync", + "kind": 5, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "_OutputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n GuardrailFunctionOutput,\n]\n_OutputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef output_guardrail(", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "_OutputGuardrailFuncAsync", + "kind": 5, + "importPath": "src.agents.guardrail", + "description": "src.agents.guardrail", + "peekOfCode": "_OutputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(", + "detail": "src.agents.guardrail", + "documentation": {} + }, + { + "label": "HandoffInputData", + "kind": 6, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "class HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"\n The items generated before the agent turn where the handoff was invoked.\n \"\"\"\n new_items: tuple[RunItem, ...]", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "Handoff", + "kind": 6, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "class Handoff(Generic[TContext]):\n \"\"\"A handoff is when an agent delegates a task to another agent.\n For example, in a customer support scenario you might have a \"triage agent\" that determines\n which agent should handle the user's request, and sub-agents that specialize in different\n areas like billing, account management, etc.\n \"\"\"\n tool_name: str\n \"\"\"The name of the tool that represents the handoff.\"\"\"\n tool_description: str\n \"\"\"The description of the tool that represents the handoff.\"\"\"", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "handoff", + "kind": 2, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n tool_name_override: str | None = None,\n tool_description_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...\n@overload\ndef handoff(", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "handoff", + "kind": 2, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n on_handoff: OnHandoffWithInput[THandoffInput],\n input_type: type[THandoffInput],\n tool_description_override: str | None = None,\n tool_name_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "handoff", + "kind": 2, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n on_handoff: OnHandoffWithoutInput,\n tool_description_override: str | None = None,\n tool_name_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...\ndef handoff(", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "handoff", + "kind": 2, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "def handoff(\n agent: Agent[TContext],\n tool_name_override: str | None = None,\n tool_description_override: str | None = None,\n on_handoff: OnHandoffWithInput[THandoffInput] | OnHandoffWithoutInput | None = None,\n input_type: type[THandoffInput] | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]:\n \"\"\"Create a handoff from an agent.", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "THandoffInput", + "kind": 5, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "THandoffInput = TypeVar(\"THandoffInput\", default=Any)\nOnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any]\nOnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "OnHandoffWithInput", + "kind": 5, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "OnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any]\nOnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "OnHandoffWithoutInput", + "kind": 5, + "importPath": "src.agents.handoffs", + "description": "src.agents.handoffs", + "peekOfCode": "OnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"\n The items generated before the agent turn where the handoff was invoked.", + "detail": "src.agents.handoffs", + "documentation": {} + }, + { + "label": "RunItemBase", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item\n (i.e. `openai.types.responses.ResponseInputItemParam`).\n \"\"\"\n def to_input_item(self) -> TResponseInputItem:\n \"\"\"Converts this item into an input item suitable for passing to the model.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "MessageOutputItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class MessageOutputItem(RunItemBase[ResponseOutputMessage]):\n \"\"\"Represents a message from the LLM.\"\"\"\n raw_item: ResponseOutputMessage\n \"\"\"The raw response output message.\"\"\"\n type: Literal[\"message_output_item\"] = \"message_output_item\"\n@dataclass\nclass HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):\n \"\"\"Represents a tool call for a handoff from one agent to another.\"\"\"\n raw_item: ResponseFunctionToolCall\n \"\"\"The raw response function tool call that represents the handoff.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "HandoffCallItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):\n \"\"\"Represents a tool call for a handoff from one agent to another.\"\"\"\n raw_item: ResponseFunctionToolCall\n \"\"\"The raw response function tool call that represents the handoff.\"\"\"\n type: Literal[\"handoff_call_item\"] = \"handoff_call_item\"\n@dataclass\nclass HandoffOutputItem(RunItemBase[TResponseInputItem]):\n \"\"\"Represents the output of a handoff.\"\"\"\n raw_item: TResponseInputItem\n \"\"\"The raw input item that represents the handoff taking place.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "HandoffOutputItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class HandoffOutputItem(RunItemBase[TResponseInputItem]):\n \"\"\"Represents the output of a handoff.\"\"\"\n raw_item: TResponseInputItem\n \"\"\"The raw input item that represents the handoff taking place.\"\"\"\n source_agent: Agent[Any]\n \"\"\"The agent that made the handoff.\"\"\"\n target_agent: Agent[Any]\n \"\"\"The agent that is being handed off to.\"\"\"\n type: Literal[\"handoff_output_item\"] = \"handoff_output_item\"\nToolCallItemTypes: TypeAlias = Union[", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "ToolCallItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class ToolCallItem(RunItemBase[ToolCallItemTypes]):\n \"\"\"Represents a tool call e.g. a function call or computer action call.\"\"\"\n raw_item: ToolCallItemTypes\n \"\"\"The raw tool call item.\"\"\"\n type: Literal[\"tool_call_item\"] = \"tool_call_item\"\n@dataclass\nclass ToolCallOutputItem(\n RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]\n):\n \"\"\"Represents the output of a tool call.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "ToolCallOutputItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class ToolCallOutputItem(\n RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]\n):\n \"\"\"Represents the output of a tool call.\"\"\"\n raw_item: FunctionCallOutput | ComputerCallOutput | LocalShellCallOutput\n \"\"\"The raw item from the model.\"\"\"\n output: Any\n \"\"\"The output of the tool call. This is whatever the tool call returned; the `raw_item`\n contains a string representation of the output.\n \"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "ReasoningItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class ReasoningItem(RunItemBase[ResponseReasoningItem]):\n \"\"\"Represents a reasoning item.\"\"\"\n raw_item: ResponseReasoningItem\n \"\"\"The raw reasoning item.\"\"\"\n type: Literal[\"reasoning_item\"] = \"reasoning_item\"\n@dataclass\nclass MCPListToolsItem(RunItemBase[McpListTools]):\n \"\"\"Represents a call to an MCP server to list tools.\"\"\"\n raw_item: McpListTools\n \"\"\"The raw MCP list tools call.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "MCPListToolsItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class MCPListToolsItem(RunItemBase[McpListTools]):\n \"\"\"Represents a call to an MCP server to list tools.\"\"\"\n raw_item: McpListTools\n \"\"\"The raw MCP list tools call.\"\"\"\n type: Literal[\"mcp_list_tools_item\"] = \"mcp_list_tools_item\"\n@dataclass\nclass MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):\n \"\"\"Represents a request for MCP approval.\"\"\"\n raw_item: McpApprovalRequest\n \"\"\"The raw MCP approval request.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "MCPApprovalRequestItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):\n \"\"\"Represents a request for MCP approval.\"\"\"\n raw_item: McpApprovalRequest\n \"\"\"The raw MCP approval request.\"\"\"\n type: Literal[\"mcp_approval_request_item\"] = \"mcp_approval_request_item\"\n@dataclass\nclass MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):\n \"\"\"Represents a response to an MCP approval request.\"\"\"\n raw_item: McpApprovalResponse\n \"\"\"The raw MCP approval response.\"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "MCPApprovalResponseItem", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):\n \"\"\"Represents a response to an MCP approval request.\"\"\"\n raw_item: McpApprovalResponse\n \"\"\"The raw MCP approval response.\"\"\"\n type: Literal[\"mcp_approval_response_item\"] = \"mcp_approval_response_item\"\nRunItem: TypeAlias = Union[\n MessageOutputItem,\n HandoffCallItem,\n HandoffOutputItem,\n ToolCallItem,", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "ModelResponse", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class ModelResponse:\n output: list[TResponseOutputItem]\n \"\"\"A list of outputs (messages, tool calls, etc) generated by the model\"\"\"\n usage: Usage\n \"\"\"The usage information for the response.\"\"\"\n response_id: str | None\n \"\"\"An ID for the response which can be used to refer to the response in subsequent calls to the\n model. Not supported by all model providers.\n If using OpenAI models via the Responses API, this is the `response_id` parameter, and it can\n be passed to `Runner.run`.", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "ItemHelpers", + "kind": 6, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "class ItemHelpers:\n @classmethod\n def extract_last_content(cls, message: TResponseOutputItem) -> str:\n \"\"\"Extracts the last text content or refusal from a message.\"\"\"\n if not isinstance(message, ResponseOutputMessage):\n return \"\"\n last_content = message.content[-1]\n if isinstance(last_content, ResponseOutputText):\n return last_content.text\n elif isinstance(last_content, ResponseOutputRefusal):", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "TResponse", + "kind": 5, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "TResponse = Response\n\"\"\"A type alias for the Response type from the OpenAI SDK.\"\"\"\nTResponseInputItem = ResponseInputItemParam\n\"\"\"A type alias for the ResponseInputItemParam type from the OpenAI SDK.\"\"\"\nTResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "TResponseInputItem", + "kind": 5, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "TResponseInputItem = ResponseInputItemParam\n\"\"\"A type alias for the ResponseInputItemParam type from the OpenAI SDK.\"\"\"\nTResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "TResponseOutputItem", + "kind": 5, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "TResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "TResponseStreamEvent", + "kind": 5, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "TResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "T", + "kind": 5, + "importPath": "src.agents.items", + "description": "src.agents.items", + "peekOfCode": "T = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item\n (i.e. `openai.types.responses.ResponseInputItemParam`).\n \"\"\"", + "detail": "src.agents.items", + "documentation": {} + }, + { + "label": "RunHooksBase", + "kind": 6, + "importPath": "src.agents.lifecycle", + "description": "src.agents.lifecycle", + "peekOfCode": "class RunHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events in an agent run. Subclass and\n override the methods you need.\n \"\"\"\n async def on_agent_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the current agent changes.\"\"\"\n pass\n async def on_agent_end(\n self,\n context: RunContextWrapper[TContext],", + "detail": "src.agents.lifecycle", + "documentation": {} + }, + { + "label": "AgentHooksBase", + "kind": 6, + "importPath": "src.agents.lifecycle", + "description": "src.agents.lifecycle", + "peekOfCode": "class AgentHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events for a specific agent. You can\n set this on `agent.hooks` to receive events for that specific agent.\n Subclass and override the methods you need.\n \"\"\"\n async def on_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the running agent is changed to this\n agent.\"\"\"\n pass\n async def on_end(", + "detail": "src.agents.lifecycle", + "documentation": {} + }, + { + "label": "TAgent", + "kind": 5, + "importPath": "src.agents.lifecycle", + "description": "src.agents.lifecycle", + "peekOfCode": "TAgent = TypeVar(\"TAgent\", bound=AgentBase, default=AgentBase)\nclass RunHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events in an agent run. Subclass and\n override the methods you need.\n \"\"\"\n async def on_agent_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the current agent changes.\"\"\"\n pass\n async def on_agent_end(\n self,", + "detail": "src.agents.lifecycle", + "documentation": {} + }, + { + "label": "RunHooks", + "kind": 5, + "importPath": "src.agents.lifecycle", + "description": "src.agents.lifecycle", + "peekOfCode": "RunHooks = RunHooksBase[TContext, Agent]\n\"\"\"Run hooks when using `Agent`.\"\"\"\nAgentHooks = AgentHooksBase[TContext, Agent]\n\"\"\"Agent hooks for `Agent`s.\"\"\"", + "detail": "src.agents.lifecycle", + "documentation": {} + }, + { + "label": "AgentHooks", + "kind": 5, + "importPath": "src.agents.lifecycle", + "description": "src.agents.lifecycle", + "peekOfCode": "AgentHooks = AgentHooksBase[TContext, Agent]\n\"\"\"Agent hooks for `Agent`s.\"\"\"", + "detail": "src.agents.lifecycle", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "src.agents.logger", + "description": "src.agents.logger", + "peekOfCode": "logger = logging.getLogger(\"openai.agents\")", + "detail": "src.agents.logger", + "documentation": {} + }, + { + "label": "_OmitTypeAnnotation", + "kind": 6, + "importPath": "src.agents.model_settings", + "description": "src.agents.model_settings", + "peekOfCode": "class _OmitTypeAnnotation:\n @classmethod\n def __get_pydantic_core_schema__(\n cls,\n _source_type: Any,\n _handler: GetCoreSchemaHandler,\n ) -> core_schema.CoreSchema:\n def validate_from_none(value: None) -> _Omit:\n return _Omit()\n from_none_schema = core_schema.chain_schema(", + "detail": "src.agents.model_settings", + "documentation": {} + }, + { + "label": "MCPToolChoice", + "kind": 6, + "importPath": "src.agents.model_settings", + "description": "src.agents.model_settings", + "peekOfCode": "class MCPToolChoice:\n server_label: str\n name: str\nOmit = Annotated[_Omit, _OmitTypeAnnotation]\nHeaders: TypeAlias = Mapping[str, Union[str, Omit]]\nToolChoice: TypeAlias = Union[Literal[\"auto\", \"required\", \"none\"], str, MCPToolChoice, None]\n@dataclass\nclass ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,", + "detail": "src.agents.model_settings", + "documentation": {} + }, + { + "label": "ModelSettings", + "kind": 6, + "importPath": "src.agents.model_settings", + "description": "src.agents.model_settings", + "peekOfCode": "class ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,\n top_p, penalties, truncation, etc.).\n Not all models/providers support all of these parameters, so please check the API documentation\n for the specific model and provider you are using.\n \"\"\"\n temperature: float | None = None\n \"\"\"The temperature to use when calling the model.\"\"\"\n top_p: float | None = None", + "detail": "src.agents.model_settings", + "documentation": {} + }, + { + "label": "Omit", + "kind": 5, + "importPath": "src.agents.model_settings", + "description": "src.agents.model_settings", + "peekOfCode": "Omit = Annotated[_Omit, _OmitTypeAnnotation]\nHeaders: TypeAlias = Mapping[str, Union[str, Omit]]\nToolChoice: TypeAlias = Union[Literal[\"auto\", \"required\", \"none\"], str, MCPToolChoice, None]\n@dataclass\nclass ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,\n top_p, penalties, truncation, etc.).\n Not all models/providers support all of these parameters, so please check the API documentation\n for the specific model and provider you are using.", + "detail": "src.agents.model_settings", + "documentation": {} + }, + { + "label": "Prompt", + "kind": 6, + "importPath": "src.agents.prompts", + "description": "src.agents.prompts", + "peekOfCode": "class Prompt(TypedDict):\n \"\"\"Prompt configuration to use for interacting with an OpenAI model.\"\"\"\n id: str\n \"\"\"The unique ID of the prompt.\"\"\"\n version: NotRequired[str]\n \"\"\"Optional version of the prompt.\"\"\"\n variables: NotRequired[dict[str, ResponsesPromptVariables]]\n \"\"\"Optional variables to substitute into the prompt.\"\"\"\n@dataclass\nclass GenerateDynamicPromptData:", + "detail": "src.agents.prompts", + "documentation": {} + }, + { + "label": "GenerateDynamicPromptData", + "kind": 6, + "importPath": "src.agents.prompts", + "description": "src.agents.prompts", + "peekOfCode": "class GenerateDynamicPromptData:\n \"\"\"Inputs to a function that allows you to dynamically generate a prompt.\"\"\"\n context: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n agent: Agent[Any]\n \"\"\"The agent for which the prompt is being generated.\"\"\"\nDynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]\n\"\"\"A function that dynamically generates a prompt.\"\"\"\nclass PromptUtil:\n @staticmethod", + "detail": "src.agents.prompts", + "documentation": {} + }, + { + "label": "PromptUtil", + "kind": 6, + "importPath": "src.agents.prompts", + "description": "src.agents.prompts", + "peekOfCode": "class PromptUtil:\n @staticmethod\n async def to_model_input(\n prompt: Prompt | DynamicPromptFunction | None,\n context: RunContextWrapper[Any],\n agent: Agent[Any],\n ) -> ResponsePromptParam | None:\n if prompt is None:\n return None\n resolved_prompt: Prompt", + "detail": "src.agents.prompts", + "documentation": {} + }, + { + "label": "DynamicPromptFunction", + "kind": 5, + "importPath": "src.agents.prompts", + "description": "src.agents.prompts", + "peekOfCode": "DynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]\n\"\"\"A function that dynamically generates a prompt.\"\"\"\nclass PromptUtil:\n @staticmethod\n async def to_model_input(\n prompt: Prompt | DynamicPromptFunction | None,\n context: RunContextWrapper[Any],\n agent: Agent[Any],\n ) -> ResponsePromptParam | None:\n if prompt is None:", + "detail": "src.agents.prompts", + "documentation": {} + }, + { + "label": "RunResultBase", + "kind": 6, + "importPath": "src.agents.result", + "description": "src.agents.result", + "peekOfCode": "class RunResultBase(abc.ABC):\n input: str | list[TResponseInputItem]\n \"\"\"The original input items i.e. the items before run() was called. This may be a mutated\n version of the input, if there are handoff input filters that mutate the input.\n \"\"\"\n new_items: list[RunItem]\n \"\"\"The new items generated during the agent run. These include things like new messages, tool\n calls and their outputs, etc.\n \"\"\"\n raw_responses: list[ModelResponse]", + "detail": "src.agents.result", + "documentation": {} + }, + { + "label": "RunResult", + "kind": 6, + "importPath": "src.agents.result", + "description": "src.agents.result", + "peekOfCode": "class RunResult(RunResultBase):\n _last_agent: Agent[Any]\n @property\n def last_agent(self) -> Agent[Any]:\n \"\"\"The last agent that was run.\"\"\"\n return self._last_agent\n def __str__(self) -> str:\n return pretty_print_result(self)\n@dataclass\nclass RunResultStreaming(RunResultBase):", + "detail": "src.agents.result", + "documentation": {} + }, + { + "label": "RunResultStreaming", + "kind": 6, + "importPath": "src.agents.result", + "description": "src.agents.result", + "peekOfCode": "class RunResultStreaming(RunResultBase):\n \"\"\"The result of an agent run in streaming mode. You can use the `stream_events` method to\n receive semantic events as they are generated.\n The streaming method will raise:\n - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit.\n - A GuardrailTripwireTriggered exception if a guardrail is tripped.\n \"\"\"\n current_agent: Agent[Any]\n \"\"\"The current agent that is running.\"\"\"\n current_turn: int", + "detail": "src.agents.result", + "documentation": {} + }, + { + "label": "T", + "kind": 5, + "importPath": "src.agents.result", + "description": "src.agents.result", + "peekOfCode": "T = TypeVar(\"T\")\n@dataclass\nclass RunResultBase(abc.ABC):\n input: str | list[TResponseInputItem]\n \"\"\"The original input items i.e. the items before run() was called. This may be a mutated\n version of the input, if there are handoff input filters that mutate the input.\n \"\"\"\n new_items: list[RunItem]\n \"\"\"The new items generated during the agent run. These include things like new messages, tool\n calls and their outputs, etc.", + "detail": "src.agents.result", + "documentation": {} + }, + { + "label": "RunConfig", + "kind": 6, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "class RunConfig:\n \"\"\"Configures settings for the entire agent run.\"\"\"\n model: str | Model | None = None\n \"\"\"The model to use for the entire agent run. If set, will override the model set on every\n agent. The model_provider passed in below must be able to resolve this model name.\n \"\"\"\n model_provider: ModelProvider = field(default_factory=MultiProvider)\n \"\"\"The model provider to use when looking up string model names. Defaults to OpenAI.\"\"\"\n model_settings: ModelSettings | None = None\n \"\"\"Configure global model settings. Any non-null values will override the agent-specific model", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "RunOptions", + "kind": 6, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "class RunOptions(TypedDict, Generic[TContext]):\n \"\"\"Arguments for ``AgentRunner`` methods.\"\"\"\n context: NotRequired[TContext | None]\n \"\"\"The context for the run.\"\"\"\n max_turns: NotRequired[int]\n \"\"\"The maximum number of turns to run for.\"\"\"\n hooks: NotRequired[RunHooks[TContext] | None]\n \"\"\"Lifecycle hooks for the run.\"\"\"\n run_config: NotRequired[RunConfig | None]\n \"\"\"Run configuration.\"\"\"", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "Runner", + "kind": 6, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "class Runner:\n @classmethod\n async def run(\n cls,\n starting_agent: Agent[TContext],\n input: str | list[TResponseInputItem],\n *,\n context: TContext | None = None,\n max_turns: int = DEFAULT_MAX_TURNS,\n hooks: RunHooks[TContext] | None = None,", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "AgentRunner", + "kind": 6, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "class AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly or subclassed.\n \"\"\"\n async def run(\n self,\n starting_agent: Agent[TContext],\n input: str | list[TResponseInputItem],\n **kwargs: Unpack[RunOptions[TContext]],", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "set_default_agent_runner", + "kind": 2, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "def set_default_agent_runner(runner: AgentRunner | None) -> None:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n DEFAULT_AGENT_RUNNER = runner or AgentRunner()\ndef get_default_agent_runner() -> AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "get_default_agent_runner", + "kind": 2, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "def get_default_agent_runner() -> AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n return DEFAULT_AGENT_RUNNER\n@dataclass\nclass RunConfig:\n \"\"\"Configures settings for the entire agent run.\"\"\"", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "DEFAULT_MAX_TURNS", + "kind": 5, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "DEFAULT_MAX_TURNS = 10\nDEFAULT_AGENT_RUNNER: AgentRunner = None # type: ignore\n# the value is set at the end of the module\ndef set_default_agent_runner(runner: AgentRunner | None) -> None:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n DEFAULT_AGENT_RUNNER = runner or AgentRunner()", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "DEFAULT_AGENT_RUNNER", + "kind": 5, + "importPath": "src.agents.run", + "description": "src.agents.run", + "peekOfCode": "DEFAULT_AGENT_RUNNER = AgentRunner()", + "detail": "src.agents.run", + "documentation": {} + }, + { + "label": "RunContextWrapper", + "kind": 6, + "importPath": "src.agents.run_context", + "description": "src.agents.run_context", + "peekOfCode": "class RunContextWrapper(Generic[TContext]):\n \"\"\"This wraps the context object that you passed to `Runner.run()`. It also contains\n information about the usage of the agent run so far.\n NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code\n you implement, like tool functions, callbacks, hooks, etc.\n \"\"\"\n context: TContext\n \"\"\"The context object (or None), passed by you to `Runner.run()`\"\"\"\n usage: Usage = field(default_factory=Usage)\n \"\"\"The usage of the agent run so far. For streamed responses, the usage will be stale until the", + "detail": "src.agents.run_context", + "documentation": {} + }, + { + "label": "TContext", + "kind": 5, + "importPath": "src.agents.run_context", + "description": "src.agents.run_context", + "peekOfCode": "TContext = TypeVar(\"TContext\", default=Any)\n@dataclass\nclass RunContextWrapper(Generic[TContext]):\n \"\"\"This wraps the context object that you passed to `Runner.run()`. It also contains\n information about the usage of the agent run so far.\n NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code\n you implement, like tool functions, callbacks, hooks, etc.\n \"\"\"\n context: TContext\n \"\"\"The context object (or None), passed by you to `Runner.run()`\"\"\"", + "detail": "src.agents.run_context", + "documentation": {} + }, + { + "label": "RawResponsesStreamEvent", + "kind": 6, + "importPath": "src.agents.stream_events", + "description": "src.agents.stream_events", + "peekOfCode": "class RawResponsesStreamEvent:\n \"\"\"Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through\n from the LLM.\n \"\"\"\n data: TResponseStreamEvent\n \"\"\"The raw responses streaming event from the LLM.\"\"\"\n type: Literal[\"raw_response_event\"] = \"raw_response_event\"\n \"\"\"The type of the event.\"\"\"\n@dataclass\nclass RunItemStreamEvent:", + "detail": "src.agents.stream_events", + "documentation": {} + }, + { + "label": "RunItemStreamEvent", + "kind": 6, + "importPath": "src.agents.stream_events", + "description": "src.agents.stream_events", + "peekOfCode": "class RunItemStreamEvent:\n \"\"\"Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will\n generate these events for new messages, tool calls, tool outputs, handoffs, etc.\n \"\"\"\n name: Literal[\n \"message_output_created\",\n \"handoff_requested\",\n # This is misspelled, but we can't change it because that would be a breaking change\n \"handoff_occured\",\n \"tool_called\",", + "detail": "src.agents.stream_events", + "documentation": {} + }, + { + "label": "AgentUpdatedStreamEvent", + "kind": 6, + "importPath": "src.agents.stream_events", + "description": "src.agents.stream_events", + "peekOfCode": "class AgentUpdatedStreamEvent:\n \"\"\"Event that notifies that there is a new agent running.\"\"\"\n new_agent: Agent[Any]\n \"\"\"The new agent.\"\"\"\n type: Literal[\"agent_updated_stream_event\"] = \"agent_updated_stream_event\"\nStreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent]\n\"\"\"A streaming event from an agent.\"\"\"", + "detail": "src.agents.stream_events", + "documentation": {} + }, + { + "label": "ensure_strict_json_schema", + "kind": 2, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "def ensure_strict_json_schema(\n schema: dict[str, Any],\n) -> dict[str, Any]:\n \"\"\"Mutates the given JSON schema to ensure it conforms to the `strict` standard\n that the OpenAI API expects.\n \"\"\"\n if schema == {}:\n return _EMPTY_SCHEMA\n return _ensure_strict_json_schema(schema, path=(), root=schema)\n# Adapted from https://github.com/openai/openai-python/blob/main/src/openai/lib/_pydantic.py", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "resolve_ref", + "kind": 2, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "def resolve_ref(*, root: dict[str, object], ref: str) -> object:\n if not ref.startswith(\"#/\"):\n raise ValueError(f\"Unexpected $ref format {ref!r}; Does not start with #/\")\n path = ref[2:].split(\"/\")\n resolved = root\n for key in path:\n value = resolved[key]\n assert is_dict(value), (\n f\"encountered non-dictionary entry while resolving {ref} - {resolved}\"\n )", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "is_dict", + "kind": 2, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "def is_dict(obj: object) -> TypeGuard[dict[str, object]]:\n # just pretend that we know there are only `str` keys\n # as that check is not worth the performance cost\n return isinstance(obj, dict)\ndef is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)\ndef has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "is_list", + "kind": 2, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)\ndef has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1\n if i > n:\n return True\n return False", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "has_more_than_n_keys", + "kind": 2, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1\n if i > n:\n return True\n return False", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "_EMPTY_SCHEMA", + "kind": 5, + "importPath": "src.agents.strict_schema", + "description": "src.agents.strict_schema", + "peekOfCode": "_EMPTY_SCHEMA = {\n \"additionalProperties\": False,\n \"type\": \"object\",\n \"properties\": {},\n \"required\": [],\n}\ndef ensure_strict_json_schema(\n schema: dict[str, Any],\n) -> dict[str, Any]:\n \"\"\"Mutates the given JSON schema to ensure it conforms to the `strict` standard", + "detail": "src.agents.strict_schema", + "documentation": {} + }, + { + "label": "FunctionToolResult", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"\n output: Any\n \"\"\"The output of the tool.\"\"\"\n run_item: RunItem\n \"\"\"The run item that was produced as a result of the tool call.\"\"\"\n@dataclass\nclass FunctionTool:\n \"\"\"A tool that wraps a function. In most cases, you should use the `function_tool` helpers to", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "FunctionTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class FunctionTool:\n \"\"\"A tool that wraps a function. In most cases, you should use the `function_tool` helpers to\n create a FunctionTool, as they let you easily wrap a Python function.\n \"\"\"\n name: str\n \"\"\"The name of the tool, as shown to the LLM. Generally the name of the function.\"\"\"\n description: str\n \"\"\"A description of the tool, as shown to the LLM.\"\"\"\n params_json_schema: dict[str, Any]\n \"\"\"The JSON schema for the tool's parameters.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "FileSearchTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class FileSearchTool:\n \"\"\"A hosted tool that lets the LLM search through a vector store. Currently only supported with\n OpenAI models, using the Responses API.\n \"\"\"\n vector_store_ids: list[str]\n \"\"\"The IDs of the vector stores to search.\"\"\"\n max_num_results: int | None = None\n \"\"\"The maximum number of results to return.\"\"\"\n include_search_results: bool = False\n \"\"\"Whether to include the search results in the output produced by the LLM.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "WebSearchTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class WebSearchTool:\n \"\"\"A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models,\n using the Responses API.\n \"\"\"\n user_location: UserLocation | None = None\n \"\"\"Optional location for the search. Lets you customize results to be relevant to a location.\"\"\"\n search_context_size: Literal[\"low\", \"medium\", \"high\"] = \"medium\"\n \"\"\"The amount of context to use for the search.\"\"\"\n @property\n def name(self):", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ComputerTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class ComputerTool:\n \"\"\"A hosted tool that lets the LLM control a computer.\"\"\"\n computer: Computer | AsyncComputer\n \"\"\"The computer implementation, which describes the environment and dimensions of the computer,\n as well as implements the computer actions like click, screenshot, etc.\n \"\"\"\n on_safety_check: Callable[[ComputerToolSafetyCheckData], MaybeAwaitable[bool]] | None = None\n \"\"\"Optional callback to acknowledge computer tool safety checks.\"\"\"\n @property\n def name(self):", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ComputerToolSafetyCheckData", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class ComputerToolSafetyCheckData:\n \"\"\"Information about a computer tool safety check.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n agent: Agent[Any]\n \"\"\"The agent performing the computer action.\"\"\"\n tool_call: ResponseComputerToolCall\n \"\"\"The computer tool call.\"\"\"\n safety_check: PendingSafetyCheck\n \"\"\"The pending safety check to acknowledge.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "MCPToolApprovalRequest", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class MCPToolApprovalRequest:\n \"\"\"A request to approve a tool call.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n data: McpApprovalRequest\n \"\"\"The data from the MCP tool approval request.\"\"\"\nclass MCPToolApprovalFunctionResult(TypedDict):\n \"\"\"The result of an MCP tool approval function.\"\"\"\n approve: bool\n \"\"\"Whether to approve the tool call.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "MCPToolApprovalFunctionResult", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class MCPToolApprovalFunctionResult(TypedDict):\n \"\"\"The result of an MCP tool approval function.\"\"\"\n approve: bool\n \"\"\"Whether to approve the tool call.\"\"\"\n reason: NotRequired[str]\n \"\"\"An optional reason, if rejected.\"\"\"\nMCPToolApprovalFunction = Callable[\n [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]\n]\n\"\"\"A function that approves or rejects a tool call.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "HostedMCPTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class HostedMCPTool:\n \"\"\"A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and\n call tools, without requiring a round trip back to your code.\n If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible\n environment, or you just prefer to run tool calls locally, then you can instead use the servers\n in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent.\"\"\"\n tool_config: Mcp\n \"\"\"The MCP tool config, which includes the server URL and other settings.\"\"\"\n on_approval_request: MCPToolApprovalFunction | None = None\n \"\"\"An optional function that will be called if approval is requested for an MCP tool. If not", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "CodeInterpreterTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class CodeInterpreterTool:\n \"\"\"A tool that allows the LLM to execute code in a sandboxed environment.\"\"\"\n tool_config: CodeInterpreter\n \"\"\"The tool config, which includes the container and other settings.\"\"\"\n @property\n def name(self):\n return \"code_interpreter\"\n@dataclass\nclass ImageGenerationTool:\n \"\"\"A tool that allows the LLM to generate images.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ImageGenerationTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class ImageGenerationTool:\n \"\"\"A tool that allows the LLM to generate images.\"\"\"\n tool_config: ImageGeneration\n \"\"\"The tool config, which image generation settings.\"\"\"\n @property\n def name(self):\n return \"image_generation\"\n@dataclass\nclass LocalShellCommandRequest:\n \"\"\"A request to execute a command on a shell.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "LocalShellCommandRequest", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class LocalShellCommandRequest:\n \"\"\"A request to execute a command on a shell.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n data: LocalShellCall\n \"\"\"The data from the local shell tool call.\"\"\"\nLocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]\n\"\"\"A function that executes a command on a shell.\"\"\"\n@dataclass\nclass LocalShellTool:", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "LocalShellTool", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class LocalShellTool:\n \"\"\"A tool that allows the LLM to execute commands on a shell.\"\"\"\n executor: LocalShellExecutor\n \"\"\"A function that executes a command on a shell.\"\"\"\n @property\n def name(self):\n return \"local_shell\"\nTool = Union[\n FunctionTool,\n FileSearchTool,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolRunFunction", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class ToolRunFunction:\n tool_call: ResponseFunctionToolCall\n function_tool: FunctionTool\n@dataclass\nclass ToolRunComputerAction:\n tool_call: ResponseComputerToolCall\n computer_tool: ComputerTool\nAction = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolRunComputerAction", + "kind": 6, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "class ToolRunComputerAction:\n tool_call: ResponseComputerToolCall\n computer_tool: ComputerTool\nAction = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "default_tool_error_function", + "kind": 2, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "function_tool", + "kind": 2, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "def function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "function_tool", + "kind": 2, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "def function_tool(\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,\n) -> Callable[[ToolFunction[...]], FunctionTool]:", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "function_tool", + "kind": 2, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "def function_tool(\n func: ToolFunction[...] | None = None,\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = default_tool_error_function,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolParams", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolParams = ParamSpec(\"ToolParams\")\nToolFunctionWithoutContext = Callable[ToolParams, Any]\nToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolFunctionWithoutContext", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolFunctionWithoutContext = Callable[ToolParams, Any]\nToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolFunctionWithContext", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolFunctionWithToolContext", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolFunction", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"\n output: Any", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "MCPToolApprovalFunction", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "MCPToolApprovalFunction = Callable[\n [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]\n]\n\"\"\"A function that approves or rejects a tool call.\"\"\"\n@dataclass\nclass HostedMCPTool:\n \"\"\"A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and\n call tools, without requiring a round trip back to your code.\n If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible\n environment, or you just prefer to run tool calls locally, then you can instead use the servers", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "LocalShellExecutor", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]\n\"\"\"A function that executes a command on a shell.\"\"\"\n@dataclass\nclass LocalShellTool:\n \"\"\"A tool that allows the LLM to execute commands on a shell.\"\"\"\n executor: LocalShellExecutor\n \"\"\"A function that executes a command on a shell.\"\"\"\n @property\n def name(self):\n return \"local_shell\"", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "Tool", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "Tool = Union[\n FunctionTool,\n FileSearchTool,\n WebSearchTool,\n ComputerTool,\n HostedMCPTool,\n LocalShellTool,\n ImageGenerationTool,\n CodeInterpreterTool,\n]", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "Action", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "Action = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolErrorFunction", + "kind": 5, + "importPath": "src.agents.tool", + "description": "src.agents.tool", + "peekOfCode": "ToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,", + "detail": "src.agents.tool", + "documentation": {} + }, + { + "label": "ToolContext", + "kind": 6, + "importPath": "src.agents.tool_context", + "description": "src.agents.tool_context", + "peekOfCode": "class ToolContext(RunContextWrapper[TContext]):\n \"\"\"The context of a tool call.\"\"\"\n tool_name: str = field(default_factory=_assert_must_pass_tool_name)\n \"\"\"The name of the tool being invoked.\"\"\"\n tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)\n \"\"\"The ID of the tool call.\"\"\"\n @classmethod\n def from_agent_context(\n cls,\n context: RunContextWrapper[TContext],", + "detail": "src.agents.tool_context", + "documentation": {} + }, + { + "label": "Usage", + "kind": 6, + "importPath": "src.agents.usage", + "description": "src.agents.usage", + "peekOfCode": "class Usage:\n requests: int = 0\n \"\"\"Total requests made to the LLM API.\"\"\"\n input_tokens: int = 0\n \"\"\"Total input tokens sent, across all requests.\"\"\"\n input_tokens_details: InputTokensDetails = field(\n default_factory=lambda: InputTokensDetails(cached_tokens=0)\n )\n \"\"\"Details about the input tokens, matching responses API usage details.\"\"\"\n output_tokens: int = 0", + "detail": "src.agents.usage", + "documentation": {} + }, + { + "label": "StreamHandler", + "kind": 6, + "importPath": "tests.fastapi.streaming_app", + "description": "tests.fastapi.streaming_app", + "peekOfCode": "class StreamHandler:\n def __init__(self, result: RunResultStreaming):\n self.result = result\n async def stream_events(self) -> AsyncIterator[str]:\n async for event in self.result.stream_events():\n yield f\"{event.type}\\n\\n\"", + "detail": "tests.fastapi.streaming_app", + "documentation": {} + }, + { + "label": "agent", + "kind": 5, + "importPath": "tests.fastapi.streaming_app", + "description": "tests.fastapi.streaming_app", + "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n)\napp = FastAPI()\n@app.post(\"/stream\")\nasync def stream():\n result = Runner.run_streamed(agent, input=\"Tell me a joke\")\n stream_handler = StreamHandler(result)\n return StreamingResponse(stream_handler.stream_events(), media_type=\"application/x-ndjson\")", + "detail": "tests.fastapi.streaming_app", + "documentation": {} + }, + { + "label": "app", + "kind": 5, + "importPath": "tests.fastapi.streaming_app", + "description": "tests.fastapi.streaming_app", + "peekOfCode": "app = FastAPI()\n@app.post(\"/stream\")\nasync def stream():\n result = Runner.run_streamed(agent, input=\"Tell me a joke\")\n stream_handler = StreamHandler(result)\n return StreamingResponse(stream_handler.stream_events(), media_type=\"application/x-ndjson\")\nclass StreamHandler:\n def __init__(self, result: RunResultStreaming):\n self.result = result\n async def stream_events(self) -> AsyncIterator[str]:", + "detail": "tests.fastapi.streaming_app", + "documentation": {} + }, + { + "label": "pytest_ignore_collect", + "kind": 2, + "importPath": "tests.mcp.conftest", + "description": "tests.mcp.conftest", + "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", + "detail": "tests.mcp.conftest", + "documentation": {} + }, + { + "label": "DummyStream", + "kind": 6, + "importPath": "tests.mcp.helpers", + "description": "tests.mcp.helpers", + "peekOfCode": "class DummyStream:\n async def send(self, msg):\n pass\n async def receive(self):\n raise Exception(\"Dummy receive not implemented\")\nclass DummyStreamsContextManager:\n async def __aenter__(self):\n return (DummyStream(), DummyStream())\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass", + "detail": "tests.mcp.helpers", + "documentation": {} + }, + { + "label": "DummyStreamsContextManager", + "kind": 6, + "importPath": "tests.mcp.helpers", + "description": "tests.mcp.helpers", + "peekOfCode": "class DummyStreamsContextManager:\n async def __aenter__(self):\n return (DummyStream(), DummyStream())\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass\nclass _TestFilterServer(_MCPServerWithClientSession):\n \"\"\"Minimal implementation of _MCPServerWithClientSession for testing tool filtering\"\"\"\n def __init__(self, tool_filter: ToolFilter, server_name: str):\n # Initialize parent class properly to avoid type errors\n super().__init__(", + "detail": "tests.mcp.helpers", + "documentation": {} + }, + { + "label": "_TestFilterServer", + "kind": 6, + "importPath": "tests.mcp.helpers", + "description": "tests.mcp.helpers", + "peekOfCode": "class _TestFilterServer(_MCPServerWithClientSession):\n \"\"\"Minimal implementation of _MCPServerWithClientSession for testing tool filtering\"\"\"\n def __init__(self, tool_filter: ToolFilter, server_name: str):\n # Initialize parent class properly to avoid type errors\n super().__init__(\n cache_tools_list=False,\n client_session_timeout_seconds=None,\n tool_filter=tool_filter,\n )\n self._server_name: str = server_name", + "detail": "tests.mcp.helpers", + "documentation": {} + }, + { + "label": "FakeMCPServer", + "kind": 6, + "importPath": "tests.mcp.helpers", + "description": "tests.mcp.helpers", + "peekOfCode": "class FakeMCPServer(MCPServer):\n def __init__(\n self,\n tools: list[MCPTool] | None = None,\n tool_filter: ToolFilter = None,\n server_name: str = \"fake_mcp_server\",\n ):\n self.tools: list[MCPTool] = tools or []\n self.tool_calls: list[str] = []\n self.tool_results: list[str] = []", + "detail": "tests.mcp.helpers", + "documentation": {} + }, + { + "label": "tee", + "kind": 5, + "importPath": "tests.mcp.helpers", + "description": "tests.mcp.helpers", + "peekOfCode": "tee = shutil.which(\"tee\") or \"\"\nassert tee, \"tee not found\"\n# Added dummy stream classes for patching stdio_client to avoid real I/O during tests\nclass DummyStream:\n async def send(self, msg):\n pass\n async def receive(self):\n raise Exception(\"Dummy receive not implemented\")\nclass DummyStreamsContextManager:\n async def __aenter__(self):", + "detail": "tests.mcp.helpers", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.mcp.test_mcp_util", + "description": "tests.mcp.test_mcp_util", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n baz: int\nclass Bar(BaseModel):\n qux: dict[str, str]\nBaz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema", + "detail": "tests.mcp.test_mcp_util", + "documentation": {} + }, + { + "label": "Bar", + "kind": 6, + "importPath": "tests.mcp.test_mcp_util", + "description": "tests.mcp.test_mcp_util", + "peekOfCode": "class Bar(BaseModel):\n qux: dict[str, str]\nBaz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema\n@pytest.mark.asyncio\nasync def test_get_all_function_tools():\n \"\"\"Test that the get_all_function_tools function returns all function tools from a list of MCP", + "detail": "tests.mcp.test_mcp_util", + "documentation": {} + }, + { + "label": "CrashingFakeMCPServer", + "kind": 6, + "importPath": "tests.mcp.test_mcp_util", + "description": "tests.mcp.test_mcp_util", + "peekOfCode": "class CrashingFakeMCPServer(FakeMCPServer):\n async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None):\n raise Exception(\"Crash!\")\n@pytest.mark.asyncio\nasync def test_mcp_invocation_crash_causes_error(caplog: pytest.LogCaptureFixture):\n caplog.set_level(logging.DEBUG)\n \"\"\"Test that bad JSON input errors are logged and re-raised.\"\"\"\n server = CrashingFakeMCPServer()\n server.add_tool(\"test_tool_1\", {})\n ctx = RunContextWrapper(context=None)", + "detail": "tests.mcp.test_mcp_util", + "documentation": {} + }, + { + "label": "Baz", + "kind": 5, + "importPath": "tests.mcp.test_mcp_util", + "description": "tests.mcp.test_mcp_util", + "peekOfCode": "Baz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema\n@pytest.mark.asyncio\nasync def test_get_all_function_tools():\n \"\"\"Test that the get_all_function_tools function returns all function tools from a list of MCP\n servers.\n \"\"\"", + "detail": "tests.mcp.test_mcp_util", + "documentation": {} + }, + { + "label": "FakeMCPPromptServer", + "kind": 6, + "importPath": "tests.mcp.test_prompt_server", + "description": "tests.mcp.test_prompt_server", + "peekOfCode": "class FakeMCPPromptServer(MCPServer):\n \"\"\"Fake MCP server for testing prompt functionality\"\"\"\n def __init__(self, server_name: str = \"fake_prompt_server\"):\n self.prompts: list[Any] = []\n self.prompt_results: dict[str, str] = {}\n self._server_name = server_name\n def add_prompt(self, name: str, description: str, arguments: dict[str, Any] | None = None):\n \"\"\"Add a prompt to the fake server\"\"\"\n from mcp.types import Prompt\n prompt = Prompt(name=name, description=description, arguments=[])", + "detail": "tests.mcp.test_prompt_server", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.mcp.test_runner_calls_mcp", + "description": "tests.mcp.test_runner_calls_mcp", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n baz: int\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"streaming\", [False, True])\nasync def test_runner_calls_mcp_tool_with_args(streaming: bool):\n \"\"\"Test that the runner calls an MCP tool when the model produces a tool call.\"\"\"\n server = FakeMCPServer()\n await server.connect()\n server.add_tool(\"test_tool_1\", {})", + "detail": "tests.mcp.test_runner_calls_mcp", + "documentation": {} + }, + { + "label": "CrashingClientSessionServer", + "kind": 6, + "importPath": "tests.mcp.test_server_errors", + "description": "tests.mcp.test_server_errors", + "peekOfCode": "class CrashingClientSessionServer(_MCPServerWithClientSession):\n def __init__(self):\n super().__init__(cache_tools_list=False, client_session_timeout_seconds=5)\n self.cleanup_called = False\n def create_streams(self):\n raise ValueError(\"Crash!\")\n async def cleanup(self):\n self.cleanup_called = True\n await super().cleanup()\n @property", + "detail": "tests.mcp.test_server_errors", + "documentation": {} + }, + { + "label": "create_test_agent", + "kind": 2, + "importPath": "tests.mcp.test_tool_filtering", + "description": "tests.mcp.test_tool_filtering", + "peekOfCode": "def create_test_agent(name: str = \"test_agent\") -> Agent:\n \"\"\"Create a test agent for filtering tests.\"\"\"\n return Agent(name=name, instructions=\"Test agent\")\ndef create_test_context() -> RunContextWrapper:\n \"\"\"Create a test run context for filtering tests.\"\"\"\n return RunContextWrapper(context=None)\n# === Static Tool Filtering Tests ===\n@pytest.mark.asyncio\nasync def test_static_tool_filtering():\n \"\"\"Test all static tool filtering scenarios: allowed, blocked, both, none, etc.\"\"\"", + "detail": "tests.mcp.test_tool_filtering", + "documentation": {} + }, + { + "label": "create_test_context", + "kind": 2, + "importPath": "tests.mcp.test_tool_filtering", + "description": "tests.mcp.test_tool_filtering", + "peekOfCode": "def create_test_context() -> RunContextWrapper:\n \"\"\"Create a test run context for filtering tests.\"\"\"\n return RunContextWrapper(context=None)\n# === Static Tool Filtering Tests ===\n@pytest.mark.asyncio\nasync def test_static_tool_filtering():\n \"\"\"Test all static tool filtering scenarios: allowed, blocked, both, none, etc.\"\"\"\n server = FakeMCPServer(server_name=\"test_server\")\n server.add_tool(\"tool1\", {})\n server.add_tool(\"tool2\", {})", + "detail": "tests.mcp.test_tool_filtering", + "documentation": {} + }, + { + "label": "verify_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def verify_serialization(model_settings: ModelSettings) -> None:\n \"\"\"Verify that ModelSettings can be serialized to a JSON string.\"\"\"\n json_dict = model_settings.to_json_dict()\n json_string = json.dumps(json_dict)\n assert json_string is not None\ndef test_basic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_basic_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_basic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n max_tokens=100,\n )\n # Now, lets serialize the ModelSettings instance to a JSON string\n verify_serialization(model_settings)", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_mcp_tool_choice_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_mcp_tool_choice_serialization() -> None:\n \"\"\"Tests whether ModelSettings with MCPToolChoice can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n tool_choice=MCPToolChoice(server_label=\"mcp\", name=\"mcp_tool\"),\n )\n # Now, lets serialize the ModelSettings instance to a JSON string\n verify_serialization(model_settings)\ndef test_all_fields_serialization() -> None:", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_all_fields_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_all_fields_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n tool_choice=\"auto\",\n parallel_tool_calls=True,", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_extra_args_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_extra_args_serialization() -> None:\n \"\"\"Test that extra_args are properly serialized.\"\"\"\n model_settings = ModelSettings(\n temperature=0.5,\n extra_args={\"custom_param\": \"value\", \"another_param\": 42, \"nested\": {\"key\": \"value\"}},\n )\n json_dict = model_settings.to_json_dict()\n assert json_dict[\"extra_args\"] == {\n \"custom_param\": \"value\",\n \"another_param\": 42,", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_extra_args_resolve", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_extra_args_resolve() -> None:\n \"\"\"Test that extra_args are properly merged in the resolve method.\"\"\"\n base_settings = ModelSettings(\n temperature=0.5, extra_args={\"param1\": \"base_value\", \"param2\": \"base_only\"}\n )\n override_settings = ModelSettings(\n top_p=0.9, extra_args={\"param1\": \"override_value\", \"param3\": \"override_only\"}\n )\n resolved = base_settings.resolve(override_settings)\n # Check that regular fields are properly resolved", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_extra_args_resolve_with_none", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_extra_args_resolve_with_none() -> None:\n \"\"\"Test that resolve works properly when one side has None extra_args.\"\"\"\n # Base with extra_args, override with None\n base_settings = ModelSettings(extra_args={\"param1\": \"value1\"})\n override_settings = ModelSettings(temperature=0.8)\n resolved = base_settings.resolve(override_settings)\n assert resolved.extra_args == {\"param1\": \"value1\"}\n assert resolved.temperature == 0.8\n # Base with None, override with extra_args\n base_settings = ModelSettings(temperature=0.5)", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_extra_args_resolve_both_none", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_extra_args_resolve_both_none() -> None:\n \"\"\"Test that resolve works when both sides have None extra_args.\"\"\"\n base_settings = ModelSettings(temperature=0.5)\n override_settings = ModelSettings(top_p=0.9)\n resolved = base_settings.resolve(override_settings)\n assert resolved.extra_args is None\n assert resolved.temperature == 0.5\n assert resolved.top_p == 0.9\ndef test_pydantic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized with Pydantic.\"\"\"", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "test_pydantic_serialization", + "kind": 2, + "importPath": "tests.model_settings.test_serialization", + "description": "tests.model_settings.test_serialization", + "peekOfCode": "def test_pydantic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized with Pydantic.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n tool_choice=\"auto\",\n parallel_tool_calls=True,", + "detail": "tests.model_settings.test_serialization", + "documentation": {} + }, + { + "label": "pytest_ignore_collect", + "kind": 2, + "importPath": "tests.models.conftest", + "description": "tests.models.conftest", + "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", + "detail": "tests.models.conftest", + "documentation": {} + }, + { + "label": "test_no_prefix_is_openai", + "kind": 2, + "importPath": "tests.models.test_map", + "description": "tests.models.test_map", + "peekOfCode": "def test_no_prefix_is_openai():\n agent = Agent(model=\"gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef openai_prefix_is_openai():\n agent = Agent(model=\"openai/gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")", + "detail": "tests.models.test_map", + "documentation": {} + }, + { + "label": "openai_prefix_is_openai", + "kind": 2, + "importPath": "tests.models.test_map", + "description": "tests.models.test_map", + "peekOfCode": "def openai_prefix_is_openai():\n agent = Agent(model=\"openai/gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, LitellmModel)", + "detail": "tests.models.test_map", + "documentation": {} + }, + { + "label": "test_litellm_prefix_is_litellm", + "kind": 2, + "importPath": "tests.models.test_map", + "description": "tests.models.test_map", + "peekOfCode": "def test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, LitellmModel)", + "detail": "tests.models.test_map", + "documentation": {} + }, + { + "label": "test_can_initialize_realtime_agent", + "kind": 2, + "importPath": "tests.realtime.test_agent", + "description": "tests.realtime.test_agent", + "peekOfCode": "def test_can_initialize_realtime_agent():\n agent = RealtimeAgent(name=\"test\", instructions=\"Hello\")\n assert agent.name == \"test\"\n assert agent.instructions == \"Hello\"\n@pytest.mark.asyncio\nasync def test_dynamic_instructions():\n agent = RealtimeAgent(name=\"test\")\n assert agent.instructions is None\n def _instructions(ctx, agt) -> str:\n assert ctx.context is None", + "detail": "tests.realtime.test_agent", + "documentation": {} + }, + { + "label": "TestConversionHelperTryConvertRawMessage", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperTryConvertRawMessage:\n \"\"\"Test suite for _ConversionHelper.try_convert_raw_message method.\"\"\"\n def test_try_convert_raw_message_valid_session_update(self):\n \"\"\"Test converting a valid session.update raw message.\"\"\"\n raw_message = RealtimeModelSendRawMessage(\n message={\n \"type\": \"session.update\",\n \"other_data\": {\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "TestConversionHelperTracingConfig", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperTracingConfig:\n \"\"\"Test suite for _ConversionHelper.convert_tracing_config method.\"\"\"\n def test_convert_tracing_config_none(self):\n \"\"\"Test converting None tracing config.\"\"\"\n result = _ConversionHelper.convert_tracing_config(None)\n assert result is None\n def test_convert_tracing_config_auto(self):\n \"\"\"Test converting 'auto' tracing config.\"\"\"\n result = _ConversionHelper.convert_tracing_config(\"auto\")\n assert result == \"auto\"", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "TestConversionHelperUserInput", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperUserInput:\n \"\"\"Test suite for _ConversionHelper user input conversion methods.\"\"\"\n def test_convert_user_input_to_conversation_item_string(self):\n \"\"\"Test converting string user input to conversation item.\"\"\"\n event = RealtimeModelSendUserInput(user_input=\"Hello, world!\")\n result = _ConversionHelper.convert_user_input_to_conversation_item(event)\n assert isinstance(result, ConversationItem)\n assert result.type == \"message\"\n assert result.role == \"user\"\n assert result.content is not None", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "TestConversionHelperAudio", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperAudio:\n \"\"\"Test suite for _ConversionHelper.convert_audio_to_input_audio_buffer_append.\"\"\"\n def test_convert_audio_to_input_audio_buffer_append(self):\n \"\"\"Test converting audio data to input audio buffer append event.\"\"\"\n audio_data = b\"test audio data\"\n event = RealtimeModelSendAudio(audio=audio_data, commit=False)\n result = _ConversionHelper.convert_audio_to_input_audio_buffer_append(event)\n assert isinstance(result, InputAudioBufferAppendEvent)\n assert result.type == \"input_audio_buffer.append\"\n # Verify base64 encoding", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "TestConversionHelperToolOutput", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperToolOutput:\n \"\"\"Test suite for _ConversionHelper.convert_tool_output method.\"\"\"\n def test_convert_tool_output(self):\n \"\"\"Test converting tool output to conversation item create event.\"\"\"\n mock_tool_call = Mock()\n mock_tool_call.call_id = \"call_123\"\n event = RealtimeModelSendToolOutput(\n tool_call=mock_tool_call,\n output=\"Function executed successfully\",\n start_response=False,", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "TestConversionHelperInterrupt", + "kind": 6, + "importPath": "tests.realtime.test_conversion_helpers", + "description": "tests.realtime.test_conversion_helpers", + "peekOfCode": "class TestConversionHelperInterrupt:\n \"\"\"Test suite for _ConversionHelper.convert_interrupt method.\"\"\"\n def test_convert_interrupt(self):\n \"\"\"Test converting interrupt parameters to conversation item truncate event.\"\"\"\n current_item_id = \"item_789\"\n current_audio_content_index = 2\n elapsed_time_ms = 1500\n result = _ConversionHelper.convert_interrupt(\n current_item_id, current_audio_content_index, elapsed_time_ms\n )", + "detail": "tests.realtime.test_conversion_helpers", + "documentation": {} + }, + { + "label": "test_user_message_conversion", + "kind": 2, + "importPath": "tests.realtime.test_item_parsing", + "description": "tests.realtime.test_item_parsing", + "peekOfCode": "def test_user_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"user\",\n content=[\n ConversationItemContent(\n id=None, audio=None, text=None, transcript=None, type=\"input_text\"\n )\n ],", + "detail": "tests.realtime.test_item_parsing", + "documentation": {} + }, + { + "label": "test_assistant_message_conversion", + "kind": 2, + "importPath": "tests.realtime.test_item_parsing", + "description": "tests.realtime.test_item_parsing", + "peekOfCode": "def test_assistant_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"assistant\",\n content=[\n ConversationItemContent(id=None, audio=None, text=None, transcript=None, type=\"text\")\n ],\n )\n converted: RealtimeMessageItem = _ConversionHelper.conversation_item_to_realtime_message_item(", + "detail": "tests.realtime.test_item_parsing", + "documentation": {} + }, + { + "label": "test_system_message_conversion", + "kind": 2, + "importPath": "tests.realtime.test_item_parsing", + "description": "tests.realtime.test_item_parsing", + "peekOfCode": "def test_system_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"system\",\n content=[\n ConversationItemContent(\n id=None, audio=None, text=None, transcript=None, type=\"input_text\"\n )\n ],", + "detail": "tests.realtime.test_item_parsing", + "documentation": {} + }, + { + "label": "test_all_events_have_type", + "kind": 2, + "importPath": "tests.realtime.test_model_events", + "description": "tests.realtime.test_model_events", + "peekOfCode": "def test_all_events_have_type() -> None:\n \"\"\"Test that all events have a type.\"\"\"\n events = get_args(RealtimeModelEvent)\n assert len(events) > 0\n for event in events:\n assert event.type is not None\n assert isinstance(event.type, str)", + "detail": "tests.realtime.test_model_events", + "documentation": {} + }, + { + "label": "TestOpenAIRealtimeWebSocketModel", + "kind": 6, + "importPath": "tests.realtime.test_openai_realtime", + "description": "tests.realtime.test_openai_realtime", + "peekOfCode": "class TestOpenAIRealtimeWebSocketModel:\n \"\"\"Test suite for OpenAIRealtimeWebSocketModel connection and event handling.\"\"\"\n @pytest.fixture\n def model(self):\n \"\"\"Create a fresh model instance for each test.\"\"\"\n return OpenAIRealtimeWebSocketModel()\n @pytest.fixture\n def mock_websocket(self):\n \"\"\"Create a mock websocket connection.\"\"\"\n mock_ws = AsyncMock()", + "detail": "tests.realtime.test_openai_realtime", + "documentation": {} + }, + { + "label": "TestConnectionLifecycle", + "kind": 6, + "importPath": "tests.realtime.test_openai_realtime", + "description": "tests.realtime.test_openai_realtime", + "peekOfCode": "class TestConnectionLifecycle(TestOpenAIRealtimeWebSocketModel):\n \"\"\"Test connection establishment, configuration, and error handling.\"\"\"\n @pytest.mark.asyncio\n async def test_connect_missing_api_key_raises_error(self, model):\n \"\"\"Test that missing API key raises UserError.\"\"\"\n config: dict[str, Any] = {\"initial_model_settings\": {}}\n with patch.dict(\"os.environ\", {}, clear=True):\n with pytest.raises(UserError, match=\"API key is required\"):\n await model.connect(config)\n @pytest.mark.asyncio", + "detail": "tests.realtime.test_openai_realtime", + "documentation": {} + }, + { + "label": "TestEventHandlingRobustness", + "kind": 6, + "importPath": "tests.realtime.test_openai_realtime", + "description": "tests.realtime.test_openai_realtime", + "peekOfCode": "class TestEventHandlingRobustness(TestOpenAIRealtimeWebSocketModel):\n \"\"\"Test event parsing, validation, and error handling robustness.\"\"\"\n @pytest.mark.asyncio\n async def test_handle_malformed_json_logs_error_continues(self, model):\n \"\"\"Test that malformed JSON emits error event but doesn't crash.\"\"\"\n mock_listener = AsyncMock()\n model.add_listener(mock_listener)\n # Malformed JSON should not crash the handler\n await model._handle_ws_event(\"invalid json {\")\n # Should emit error event to listeners", + "detail": "tests.realtime.test_openai_realtime", + "documentation": {} + }, + { + "label": "MockRealtimeModel", + "kind": 6, + "importPath": "tests.realtime.test_runner", + "description": "tests.realtime.test_runner", + "peekOfCode": "class MockRealtimeModel(RealtimeModel):\n async def connect(self, options=None):\n pass\n def add_listener(self, listener):\n pass\n def remove_listener(self, listener):\n pass\n async def send_event(self, event):\n pass\n async def send_message(self, message, other_event_data=None):", + "detail": "tests.realtime.test_runner", + "documentation": {} + }, + { + "label": "mock_agent", + "kind": 2, + "importPath": "tests.realtime.test_runner", + "description": "tests.realtime.test_runner", + "peekOfCode": "def mock_agent():\n agent = Mock(spec=RealtimeAgent)\n agent.get_system_prompt = AsyncMock(return_value=\"Test instructions\")\n agent.get_all_tools = AsyncMock(return_value=[{\"type\": \"function\", \"name\": \"test_tool\"}])\n return agent\n@pytest.fixture\ndef mock_model():\n return MockRealtimeModel()\n@pytest.mark.asyncio\nasync def test_run_creates_session_with_no_settings(mock_agent, mock_model):", + "detail": "tests.realtime.test_runner", + "documentation": {} + }, + { + "label": "mock_model", + "kind": 2, + "importPath": "tests.realtime.test_runner", + "description": "tests.realtime.test_runner", + "peekOfCode": "def mock_model():\n return MockRealtimeModel()\n@pytest.mark.asyncio\nasync def test_run_creates_session_with_no_settings(mock_agent, mock_model):\n \"\"\"Test that run() creates a session correctly if no settings are provided\"\"\"\n runner = RealtimeRunner(mock_agent, model=mock_model)\n with patch(\"agents.realtime.runner.RealtimeSession\") as mock_session_class:\n mock_session = Mock(spec=RealtimeSession)\n mock_session_class.return_value = mock_session\n session = await runner.run()", + "detail": "tests.realtime.test_runner", + "documentation": {} + }, + { + "label": "MockRealtimeModel", + "kind": 6, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "class MockRealtimeModel(RealtimeModel):\n def __init__(self):\n super().__init__()\n self.listeners = []\n self.connect_called = False\n self.close_called = False\n self.sent_events = []\n # Legacy tracking for tests that haven't been updated yet\n self.sent_messages = []\n self.sent_audio = []", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "TestEventHandling", + "kind": 6, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "class TestEventHandling:\n \"\"\"Test suite for event handling and transformation in RealtimeSession.on_event\"\"\"\n @pytest.mark.asyncio\n async def test_error_event_transformation(self, mock_model, mock_agent):\n \"\"\"Test that error events are properly transformed and queued\"\"\"\n session = RealtimeSession(mock_model, mock_agent, None)\n error_event = RealtimeModelErrorEvent(error=\"Test error\")\n await session.on_event(error_event)\n # Check that events were queued\n assert session._event_queue.qsize() == 2", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "TestHistoryManagement", + "kind": 6, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "class TestHistoryManagement:\n \"\"\"Test suite for history management and audio transcription in\n RealtimeSession._get_new_history\"\"\"\n def test_merge_transcript_into_existing_audio_message(self):\n \"\"\"Test merging audio transcript into existing placeholder input_audio message\"\"\"\n # Create initial history with audio message without transcript\n initial_item = UserMessageItem(\n item_id=\"item_1\",\n role=\"user\",\n content=[", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "TestToolCallExecution", + "kind": 6, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "class TestToolCallExecution:\n \"\"\"Test suite for tool call execution flow in RealtimeSession._handle_tool_call\"\"\"\n @pytest.mark.asyncio\n async def test_function_tool_execution_success(\n self, mock_model, mock_agent, mock_function_tool\n ):\n \"\"\"Test successful function tool execution\"\"\"\n # Set up agent to return our mock tool\n mock_agent.get_all_tools.return_value = [mock_function_tool]\n session = RealtimeSession(mock_model, mock_agent, None)", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "TestGuardrailFunctionality", + "kind": 6, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "class TestGuardrailFunctionality:\n \"\"\"Test suite for output guardrail functionality in RealtimeSession\"\"\"\n async def _wait_for_guardrail_tasks(self, session):\n \"\"\"Wait for all pending guardrail tasks to complete.\"\"\"\n import asyncio\n if session._guardrail_tasks:\n await asyncio.gather(*session._guardrail_tasks, return_exceptions=True)\n @pytest.fixture\n def triggered_guardrail(self):\n \"\"\"Creates a guardrail that always triggers\"\"\"", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "mock_agent", + "kind": 2, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "def mock_agent():\n agent = Mock(spec=RealtimeAgent)\n agent.get_all_tools = AsyncMock(return_value=[])\n return agent\n@pytest.fixture\ndef mock_model():\n return MockRealtimeModel()\n@pytest.fixture\ndef mock_function_tool():\n tool = Mock(spec=FunctionTool)", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "mock_model", + "kind": 2, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "def mock_model():\n return MockRealtimeModel()\n@pytest.fixture\ndef mock_function_tool():\n tool = Mock(spec=FunctionTool)\n tool.name = \"test_function\"\n tool.on_invoke_tool = AsyncMock(return_value=\"function_result\")\n return tool\n@pytest.fixture\ndef mock_handoff():", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "mock_function_tool", + "kind": 2, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "def mock_function_tool():\n tool = Mock(spec=FunctionTool)\n tool.name = \"test_function\"\n tool.on_invoke_tool = AsyncMock(return_value=\"function_result\")\n return tool\n@pytest.fixture\ndef mock_handoff():\n handoff = Mock(spec=Handoff)\n handoff.name = \"test_handoff\"\n return handoff", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "mock_handoff", + "kind": 2, + "importPath": "tests.realtime.test_session", + "description": "tests.realtime.test_session", + "peekOfCode": "def mock_handoff():\n handoff = Mock(spec=Handoff)\n handoff.name = \"test_handoff\"\n return handoff\nclass TestEventHandling:\n \"\"\"Test suite for event handling and transformation in RealtimeSession.on_event\"\"\"\n @pytest.mark.asyncio\n async def test_error_event_transformation(self, mock_model, mock_agent):\n \"\"\"Test that error events are properly transformed and queued\"\"\"\n session = RealtimeSession(mock_model, mock_agent, None)", + "detail": "tests.realtime.test_session", + "documentation": {} + }, + { + "label": "TestRealtimeTracingIntegration", + "kind": 6, + "importPath": "tests.realtime.test_tracing", + "description": "tests.realtime.test_tracing", + "peekOfCode": "class TestRealtimeTracingIntegration:\n \"\"\"Test tracing configuration and session.update integration.\"\"\"\n @pytest.fixture\n def model(self):\n \"\"\"Create a fresh model instance for each test.\"\"\"\n return OpenAIRealtimeWebSocketModel()\n @pytest.fixture\n def mock_websocket(self):\n \"\"\"Create a mock websocket connection.\"\"\"\n mock_ws = AsyncMock()", + "detail": "tests.realtime.test_tracing", + "documentation": {} + }, + { + "label": "pytest_ignore_collect", + "kind": 2, + "importPath": "tests.voice.conftest", + "description": "tests.voice.conftest", + "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", + "detail": "tests.voice.conftest", + "documentation": {} + }, + { + "label": "FakeTTS", + "kind": 6, + "importPath": "tests.voice.fake_models", + "description": "tests.voice.fake_models", + "peekOfCode": "class FakeTTS(TTSModel):\n \"\"\"Fakes TTS by just returning string bytes.\"\"\"\n def __init__(self, strategy: Literal[\"default\", \"split_words\"] = \"default\"):\n self.strategy = strategy\n @property\n def model_name(self) -> str:\n return \"fake_tts\"\n async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:\n if self.strategy == \"default\":\n yield np.zeros(2, dtype=np.int16).tobytes()", + "detail": "tests.voice.fake_models", + "documentation": {} + }, + { + "label": "FakeSession", + "kind": 6, + "importPath": "tests.voice.fake_models", + "description": "tests.voice.fake_models", + "peekOfCode": "class FakeSession(StreamedTranscriptionSession):\n \"\"\"A fake streamed transcription session that yields preconfigured transcripts.\"\"\"\n def __init__(self):\n self.outputs: list[str] = []\n async def transcribe_turns(self) -> AsyncIterator[str]:\n for t in self.outputs:\n yield t\n async def close(self) -> None:\n return None\nclass FakeSTT(STTModel):", + "detail": "tests.voice.fake_models", + "documentation": {} + }, + { + "label": "FakeSTT", + "kind": 6, + "importPath": "tests.voice.fake_models", + "description": "tests.voice.fake_models", + "peekOfCode": "class FakeSTT(STTModel):\n \"\"\"A fake STT model that either returns a single transcript or yields multiple.\"\"\"\n def __init__(self, outputs: list[str] | None = None):\n self.outputs = outputs or []\n @property\n def model_name(self) -> str:\n return \"fake_stt\"\n async def transcribe(self, _: AudioInput, __: STTModelSettings, ___: bool, ____: bool) -> str:\n return self.outputs.pop(0)\n async def create_session(", + "detail": "tests.voice.fake_models", + "documentation": {} + }, + { + "label": "FakeWorkflow", + "kind": 6, + "importPath": "tests.voice.fake_models", + "description": "tests.voice.fake_models", + "peekOfCode": "class FakeWorkflow(VoiceWorkflowBase):\n \"\"\"A fake workflow that yields preconfigured outputs.\"\"\"\n def __init__(self, outputs: list[list[str]] | None = None):\n self.outputs = outputs or []\n def add_output(self, output: list[str]) -> None:\n self.outputs.append(output)\n def add_multiple_outputs(self, outputs: list[list[str]]) -> None:\n self.outputs.extend(outputs)\n async def run(self, _: str) -> AsyncIterator[str]:\n if not self.outputs:", + "detail": "tests.voice.fake_models", + "documentation": {} + }, + { + "label": "FakeStreamedAudioInput", + "kind": 6, + "importPath": "tests.voice.fake_models", + "description": "tests.voice.fake_models", + "peekOfCode": "class FakeStreamedAudioInput:\n @classmethod\n async def get(cls, count: int) -> StreamedAudioInput:\n input = StreamedAudioInput()\n for _ in range(count):\n await input.add_audio(np.zeros(2, dtype=np.int16))\n return input", + "detail": "tests.voice.fake_models", + "documentation": {} + }, + { + "label": "TestAudioInput", + "kind": 6, + "importPath": "tests.voice.test_input", + "description": "tests.voice.test_input", + "peekOfCode": "class TestAudioInput:\n def test_audio_input_default_params(self):\n # Create a simple sine wave\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n audio_input = AudioInput(buffer=buffer)\n assert audio_input.frame_rate == DEFAULT_SAMPLE_RATE\n assert audio_input.sample_width == 2\n assert audio_input.channels == 1\n assert np.array_equal(audio_input.buffer, buffer)", + "detail": "tests.voice.test_input", + "documentation": {} + }, + { + "label": "TestStreamedAudioInput", + "kind": 6, + "importPath": "tests.voice.test_input", + "description": "tests.voice.test_input", + "peekOfCode": "class TestStreamedAudioInput:\n @pytest.mark.asyncio\n async def test_streamed_audio_input(self):\n streamed_input = StreamedAudioInput()\n # Create some test audio data\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n audio1 = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n audio2 = np.sin(2 * np.pi * 880 * t).astype(np.float32)\n # Add audio to the queue\n await streamed_input.add_audio(audio1)", + "detail": "tests.voice.test_input", + "documentation": {} + }, + { + "label": "test_buffer_to_audio_file_int16", + "kind": 2, + "importPath": "tests.voice.test_input", + "description": "tests.voice.test_input", + "peekOfCode": "def test_buffer_to_audio_file_int16():\n # Create a simple sine wave in int16 format\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = (np.sin(2 * np.pi * 440 * t) * 32767).astype(np.int16)\n filename, audio_file, content_type = _buffer_to_audio_file(buffer)\n assert filename == \"audio.wav\"\n assert content_type == \"audio/wav\"\n assert isinstance(audio_file, io.BytesIO)\n # Verify the WAV file contents\n with wave.open(audio_file, \"rb\") as wav_file:", + "detail": "tests.voice.test_input", + "documentation": {} + }, + { + "label": "test_buffer_to_audio_file_float32", + "kind": 2, + "importPath": "tests.voice.test_input", + "description": "tests.voice.test_input", + "peekOfCode": "def test_buffer_to_audio_file_float32():\n # Create a simple sine wave in float32 format\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n filename, audio_file, content_type = _buffer_to_audio_file(buffer)\n assert filename == \"audio.wav\"\n assert content_type == \"audio/wav\"\n assert isinstance(audio_file, io.BytesIO)\n # Verify the WAV file contents\n with wave.open(audio_file, \"rb\") as wav_file:", + "detail": "tests.voice.test_input", + "documentation": {} + }, + { + "label": "test_buffer_to_audio_file_invalid_dtype", + "kind": 2, + "importPath": "tests.voice.test_input", + "description": "tests.voice.test_input", + "peekOfCode": "def test_buffer_to_audio_file_invalid_dtype():\n # Create a buffer with invalid dtype (float64)\n buffer = np.array([1.0, 2.0, 3.0], dtype=np.float64)\n with pytest.raises(UserError, match=\"Buffer must be a numpy array of int16 or float32\"):\n # Purposely ignore the type error\n _buffer_to_audio_file(buffer) # type: ignore\nclass TestAudioInput:\n def test_audio_input_default_params(self):\n # Create a simple sine wave\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)", + "detail": "tests.voice.test_input", + "documentation": {} + }, + { + "label": "create_mock_websocket", + "kind": 2, + "importPath": "tests.voice.test_openai_stt", + "description": "tests.voice.test_openai_stt", + "peekOfCode": "def create_mock_websocket(messages: list[str]) -> AsyncMock:\n \"\"\"\n Creates a mock websocket (AsyncMock) that will return the provided incoming_messages\n from __aiter__() as if they came from the server.\n \"\"\"\n mock_ws = AsyncMock()\n mock_ws.__aenter__.return_value = mock_ws\n # The incoming_messages are strings that we pretend come from the server\n mock_ws.__aiter__.return_value = iter(messages)\n return mock_ws", + "detail": "tests.voice.test_openai_stt", + "documentation": {} + }, + { + "label": "fake_time", + "kind": 2, + "importPath": "tests.voice.test_openai_stt", + "description": "tests.voice.test_openai_stt", + "peekOfCode": "def fake_time(increment: int):\n current = 1000\n while True:\n yield current\n current += increment\n# ===== Tests =====\n@pytest.mark.asyncio\nasync def test_non_json_messages_should_crash():\n \"\"\"This tests that non-JSON messages will raise an exception\"\"\"\n # Setup: mock websockets.connect", + "detail": "tests.voice.test_openai_stt", + "documentation": {} + }, + { + "label": "_FakeStreamResponse", + "kind": 6, + "importPath": "tests.voice.test_openai_tts", + "description": "tests.voice.test_openai_tts", + "peekOfCode": "class _FakeStreamResponse:\n \"\"\"A minimal async context manager to simulate streaming audio bytes.\"\"\"\n def __init__(self, chunks: list[bytes]):\n self._chunks = chunks\n async def __aenter__(self) -> \"_FakeStreamResponse\":\n return self\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n return None\n async def iter_bytes(self, chunk_size: int = 1024):\n for chunk in self._chunks:", + "detail": "tests.voice.test_openai_tts", + "documentation": {} + }, + { + "label": "FakeStreamingModel", + "kind": 6, + "importPath": "tests.voice.test_workflow", + "description": "tests.voice.test_workflow", + "peekOfCode": "class FakeStreamingModel(Model):\n def __init__(self):\n self.turn_outputs: list[list[TResponseOutputItem]] = []\n def set_next_output(self, output: list[TResponseOutputItem]):\n self.turn_outputs.append(output)\n def add_multiple_turn_outputs(self, outputs: list[list[TResponseOutputItem]]):\n self.turn_outputs.extend(outputs)\n def get_next_output(self) -> list[TResponseOutputItem]:\n if not self.turn_outputs:\n return []", + "detail": "tests.voice.test_workflow", + "documentation": {} + }, + { + "label": "setup_span_processor", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def setup_span_processor():\n set_trace_processors([SPAN_PROCESSOR_TESTING])\n# This fixture will run before each test\n@pytest.fixture(autouse=True)\ndef clear_span_processor():\n SPAN_PROCESSOR_TESTING.force_flush()\n SPAN_PROCESSOR_TESTING.shutdown()\n SPAN_PROCESSOR_TESTING.clear()\n# This fixture will run before each test\n@pytest.fixture(autouse=True)", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "clear_span_processor", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def clear_span_processor():\n SPAN_PROCESSOR_TESTING.force_flush()\n SPAN_PROCESSOR_TESTING.shutdown()\n SPAN_PROCESSOR_TESTING.clear()\n# This fixture will run before each test\n@pytest.fixture(autouse=True)\ndef clear_openai_settings():\n _openai_shared._default_openai_key = None\n _openai_shared._default_openai_client = None\n _openai_shared._use_responses_by_default = True", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "clear_openai_settings", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def clear_openai_settings():\n _openai_shared._default_openai_key = None\n _openai_shared._default_openai_client = None\n _openai_shared._use_responses_by_default = True\n@pytest.fixture(autouse=True)\ndef clear_default_runner():\n set_default_agent_runner(None)\n# This fixture will run after all tests end\n@pytest.fixture(autouse=True, scope=\"session\")\ndef shutdown_trace_provider():", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "clear_default_runner", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def clear_default_runner():\n set_default_agent_runner(None)\n# This fixture will run after all tests end\n@pytest.fixture(autouse=True, scope=\"session\")\ndef shutdown_trace_provider():\n yield\n get_trace_provider().shutdown()\n@pytest.fixture(autouse=True)\ndef disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "shutdown_trace_provider", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def shutdown_trace_provider():\n yield\n get_trace_provider().shutdown()\n@pytest.fixture(autouse=True)\ndef disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.\n if request.node.get_closest_marker(\"allow_call_model_methods\"):\n return\n def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "disable_real_model_clients", + "kind": 2, + "importPath": "tests.conftest", + "description": "tests.conftest", + "peekOfCode": "def disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.\n if request.node.get_closest_marker(\"allow_call_model_methods\"):\n return\n def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")\n monkeypatch.setattr(OpenAIResponsesModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIResponsesModel, \"stream_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"stream_response\", failing_version)", + "detail": "tests.conftest", + "documentation": {} + }, + { + "label": "FakeModel", + "kind": 6, + "importPath": "tests.fake_model", + "description": "tests.fake_model", + "peekOfCode": "class FakeModel(Model):\n def __init__(\n self,\n tracing_enabled: bool = False,\n initial_output: list[TResponseOutputItem] | Exception | None = None,\n ):\n if initial_output is None:\n initial_output = []\n self.turn_outputs: list[list[TResponseOutputItem] | Exception] = (\n [initial_output] if initial_output else []", + "detail": "tests.fake_model", + "documentation": {} + }, + { + "label": "get_response_obj", + "kind": 2, + "importPath": "tests.fake_model", + "description": "tests.fake_model", + "peekOfCode": "def get_response_obj(\n output: list[TResponseOutputItem],\n response_id: str | None = None,\n usage: Usage | None = None,\n) -> Response:\n return Response(\n id=response_id or \"123\",\n created_at=123,\n model=\"test_model\",\n object=\"response\",", + "detail": "tests.fake_model", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_agent_config", + "description": "tests.test_agent_config", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_agent_final_output():\n agent = Agent(\n name=\"test\",\n output_type=Foo,\n )\n schema = AgentRunner._get_output_schema(agent)\n assert isinstance(schema, AgentOutputSchema)", + "detail": "tests.test_agent_config", + "documentation": {} + }, + { + "label": "AgentHooksForTests", + "kind": 6, + "importPath": "tests.test_agent_hooks", + "description": "tests.test_agent_hooks", + "peekOfCode": "class AgentHooksForTests(AgentHooks):\n def __init__(self):\n self.events: dict[str, int] = defaultdict(int)\n def reset(self):\n self.events.clear()\n async def on_start(self, context: RunContextWrapper[TContext], agent: Agent[TContext]) -> None:\n self.events[\"on_start\"] += 1\n async def on_end(\n self,\n context: RunContextWrapper[TContext],", + "detail": "tests.test_agent_hooks", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_agent_hooks", + "description": "tests.test_agent_hooks", + "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_agent_hooks():\n hooks = AgentHooksForTests()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", + "detail": "tests.test_agent_hooks", + "documentation": {} + }, + { + "label": "EmptyAgentHooks", + "kind": 6, + "importPath": "tests.test_agent_hooks", + "description": "tests.test_agent_hooks", + "peekOfCode": "class EmptyAgentHooks(AgentHooks):\n pass\n@pytest.mark.asyncio\nasync def test_base_agent_hooks_dont_crash():\n hooks = EmptyAgentHooks()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", + "detail": "tests.test_agent_hooks", + "documentation": {} + }, + { + "label": "PromptCaptureFakeModel", + "kind": 6, + "importPath": "tests.test_agent_prompt", + "description": "tests.test_agent_prompt", + "peekOfCode": "class PromptCaptureFakeModel(FakeModel):\n \"\"\"Subclass of FakeModel that records the prompt passed to the model.\"\"\"\n def __init__(self):\n super().__init__()\n self.last_prompt = None\n async def get_response(\n self,\n system_instructions,\n input,\n model_settings,", + "detail": "tests.test_agent_prompt", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_agent_runner", + "description": "tests.test_agent_runner", + "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_structured_output():\n model = FakeModel()\n agent_1 = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"bar\", \"bar_result\")],\n output_type=Foo,", + "detail": "tests.test_agent_runner", + "documentation": {} + }, + { + "label": "remove_new_items", + "kind": 2, + "importPath": "tests.test_agent_runner", + "description": "tests.test_agent_runner", + "peekOfCode": "def remove_new_items(handoff_input_data: HandoffInputData) -> HandoffInputData:\n return HandoffInputData(\n input_history=handoff_input_data.input_history,\n pre_handoff_items=(),\n new_items=(),\n )\n@pytest.mark.asyncio\nasync def test_handoff_filters():\n model = FakeModel()\n agent_1 = Agent(", + "detail": "tests.test_agent_runner", + "documentation": {} + }, + { + "label": "test_tool_one", + "kind": 2, + "importPath": "tests.test_agent_runner", + "description": "tests.test_agent_runner", + "peekOfCode": "def test_tool_one():\n return Foo(bar=\"tool_one_result\")\n@function_tool\ndef test_tool_two():\n return \"tool_two_result\"\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_first_output():\n model = FakeModel()\n agent = Agent(\n name=\"test\",", + "detail": "tests.test_agent_runner", + "documentation": {} + }, + { + "label": "test_tool_two", + "kind": 2, + "importPath": "tests.test_agent_runner", + "description": "tests.test_agent_runner", + "peekOfCode": "def test_tool_two():\n return \"tool_two_result\"\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_first_output():\n model = FakeModel()\n agent = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"foo\", \"tool_result\"), test_tool_one, test_tool_two],\n tool_use_behavior=\"stop_on_first_tool\",", + "detail": "tests.test_agent_runner", + "documentation": {} + }, + { + "label": "custom_tool_use_behavior", + "kind": 2, + "importPath": "tests.test_agent_runner", + "description": "tests.test_agent_runner", + "peekOfCode": "def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]\n) -> ToolsToFinalOutputResult:\n if \"test_tool_one\" in [result.tool.name for result in results]:\n return ToolsToFinalOutputResult(is_final_output=True, final_output=\"the_final_output\")\n else:\n return ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_custom_function():\n model = FakeModel()", + "detail": "tests.test_agent_runner", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_agent_runner_streamed", + "description": "tests.test_agent_runner_streamed", + "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_structured_output():\n model = FakeModel()\n agent_1 = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"bar\", \"bar_result\")],\n output_type=Foo,", + "detail": "tests.test_agent_runner_streamed", + "documentation": {} + }, + { + "label": "remove_new_items", + "kind": 2, + "importPath": "tests.test_agent_runner_streamed", + "description": "tests.test_agent_runner_streamed", + "peekOfCode": "def remove_new_items(handoff_input_data: HandoffInputData) -> HandoffInputData:\n return HandoffInputData(\n input_history=handoff_input_data.input_history,\n pre_handoff_items=(),\n new_items=(),\n )\n@pytest.mark.asyncio\nasync def test_handoff_filters():\n model = FakeModel()\n agent_1 = Agent(", + "detail": "tests.test_agent_runner_streamed", + "documentation": {} + }, + { + "label": "LoggingComputer", + "kind": 6, + "importPath": "tests.test_computer_action", + "description": "tests.test_computer_action", + "peekOfCode": "class LoggingComputer(Computer):\n \"\"\"A `Computer` implementation that logs calls to its methods for verification in tests.\"\"\"\n def __init__(self, screenshot_return: str = \"screenshot\"):\n self.calls: list[tuple[str, tuple[Any, ...]]] = []\n self._screenshot_return = screenshot_return\n @property\n def environment(self):\n return \"mac\"\n @property\n def dimensions(self) -> tuple[int, int]:", + "detail": "tests.test_computer_action", + "documentation": {} + }, + { + "label": "LoggingAsyncComputer", + "kind": 6, + "importPath": "tests.test_computer_action", + "description": "tests.test_computer_action", + "peekOfCode": "class LoggingAsyncComputer(AsyncComputer):\n \"\"\"An `AsyncComputer` implementation that logs calls to its methods for verification.\"\"\"\n def __init__(self, screenshot_return: str = \"async_screenshot\"):\n self.calls: list[tuple[str, tuple[Any, ...]]] = []\n self._screenshot_return = screenshot_return\n @property\n def environment(self):\n return \"mac\"\n @property\n def dimensions(self) -> tuple[int, int]:", + "detail": "tests.test_computer_action", + "documentation": {} + }, + { + "label": "LoggingRunHooks", + "kind": 6, + "importPath": "tests.test_computer_action", + "description": "tests.test_computer_action", + "peekOfCode": "class LoggingRunHooks(RunHooks[Any]):\n \"\"\"Capture on_tool_start and on_tool_end invocations.\"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.started: list[tuple[Agent[Any], Any]] = []\n self.ended: list[tuple[Agent[Any], Any, str]] = []\n async def on_tool_start(\n self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action,\n ) -> None:\n self.started.append((agent, action.computer_tool))", + "detail": "tests.test_computer_action", + "documentation": {} + }, + { + "label": "LoggingAgentHooks", + "kind": 6, + "importPath": "tests.test_computer_action", + "description": "tests.test_computer_action", + "peekOfCode": "class LoggingAgentHooks(AgentHooks[Any]):\n \"\"\"Minimal override to capture agent's tool hook invocations.\"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.started: list[tuple[Agent[Any], Any]] = []\n self.ended: list[tuple[Agent[Any], Any, str]] = []\n async def on_tool_start(\n self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action,\n ) -> None:\n self.started.append((agent, action.computer_tool))", + "detail": "tests.test_computer_action", + "documentation": {} + }, + { + "label": "test_cc_no_default_key_errors", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_cc_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\ndef test_cc_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_cc_set_default_openai_key", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_cc_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_no_default_key_errors(monkeypatch):", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_cc_set_default_openai_client", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n assert os.getenv(\"OPENAI_API_KEY\") is None\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=True).get_model(\"gpt-4\")", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_resp_no_default_key_errors", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_resp_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n assert os.getenv(\"OPENAI_API_KEY\") is None\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\ndef test_resp_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_set_default_openai_client():", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_resp_set_default_openai_key", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_resp_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_set_default_openai_api():", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_resp_set_default_openai_client", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_resp_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_set_default_openai_api():\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (\n \"Default should be responses\"\n )\n set_default_openai_api(\"chat_completions\")", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "test_set_default_openai_api", + "kind": 2, + "importPath": "tests.test_config", + "description": "tests.test_config", + "peekOfCode": "def test_set_default_openai_api():\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (\n \"Default should be responses\"\n )\n set_default_openai_api(\"chat_completions\")\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIChatCompletionsModel), (\n \"Should be chat completions model\"\n )\n set_default_openai_api(\"responses\")\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (", + "detail": "tests.test_config", + "documentation": {} + }, + { + "label": "Bar", + "kind": 6, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "class Bar:\n def func_bar(self, a: int, b: float) -> str:\n \"\"\"\n This is func_bar.\n Args:\n a: The first argument.\n b: The second argument.\n Returns:\n A result\n \"\"\"", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "func_foo_google", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def func_foo_google(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n Args:\n a: The first argument.\n b: The second argument.\n Returns:\n A result\n \"\"\"\n return \"ok\"", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "func_foo_numpy", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def func_foo_numpy(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n Parameters\n ----------\n a: int\n The first argument.\n b: float\n The second argument.\n Returns", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "func_foo_sphinx", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def func_foo_sphinx(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n :param a: The first argument.\n :param b: The second argument.\n :return: A result\n \"\"\"\n return \"ok\"\nclass Bar:\n def func_bar(self, a: int, b: float) -> str:", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "test_functions_are_ok", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def test_functions_are_ok():\n func_foo_google(1, 2.0)\n func_foo_numpy(1, 2.0)\n func_foo_sphinx(1, 2.0)\n Bar().func_bar(1, 2.0)\n Bar.func_baz(1, 2.0)\ndef test_auto_detection() -> None:\n doc = generate_func_documentation(func_foo_google)\n assert doc.name == \"func_foo_google\"\n assert doc.description == \"This is func_foo.\"", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "test_auto_detection", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def test_auto_detection() -> None:\n doc = generate_func_documentation(func_foo_google)\n assert doc.name == \"func_foo_google\"\n assert doc.description == \"This is func_foo.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\n doc = generate_func_documentation(func_foo_numpy)\n assert doc.name == \"func_foo_numpy\"\n assert doc.description == \"This is func_foo.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\n doc = generate_func_documentation(func_foo_sphinx)", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "test_instance_method", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def test_instance_method() -> None:\n bar = Bar()\n doc = generate_func_documentation(bar.func_bar)\n assert doc.name == \"func_bar\"\n assert doc.description == \"This is func_bar.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\ndef test_classmethod() -> None:\n doc = generate_func_documentation(Bar.func_baz)\n assert doc.name == \"func_baz\"\n assert doc.description == \"This is func_baz.\"", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "test_classmethod", + "kind": 2, + "importPath": "tests.test_doc_parsing", + "description": "tests.test_doc_parsing", + "peekOfCode": "def test_classmethod() -> None:\n doc = generate_func_documentation(Bar.func_baz)\n assert doc.name == \"func_baz\"\n assert doc.description == \"This is func_baz.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}", + "detail": "tests.test_doc_parsing", + "documentation": {} + }, + { + "label": "fake_agent", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def fake_agent():\n return Agent(\n name=\"fake_agent\",\n )\ndef _get_message_input_item(content: str) -> TResponseInputItem:\n return {\n \"role\": \"assistant\",\n \"content\": content,\n }\ndef _get_function_result_input_item(content: str) -> TResponseInputItem:", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_empty_data", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_empty_data():\n handoff_input_data = HandoffInputData(input_history=(), pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_historyonly():\n handoff_input_data = HandoffInputData(input_history=\"Hello\", pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_history_and_list():\n handoff_input_data = HandoffInputData(", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_str_historyonly", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_str_historyonly():\n handoff_input_data = HandoffInputData(input_history=\"Hello\", pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=\"Hello\",\n pre_handoff_items=(),\n new_items=(_get_message_output_run_item(\"Hello\"),),\n )", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_str_history_and_list", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_str_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=\"Hello\",\n pre_handoff_items=(),\n new_items=(_get_message_output_run_item(\"Hello\"),),\n )\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_list_history_and_list():\n handoff_input_data = HandoffInputData(", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_list_history_and_list", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_list_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=(_get_message_input_item(\"Hello\"),),\n pre_handoff_items=(_get_message_output_run_item(\"123\"),),\n new_items=(_get_message_output_run_item(\"World\"),),\n )\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_removes_tools_from_history():\n handoff_input_data = HandoffInputData(", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_removes_tools_from_history", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_removes_tools_from_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_function_result_input_item(\"World\"),\n _get_message_input_item(\"Hello2\"),\n ),\n pre_handoff_items=(\n _get_tool_output_run_item(\"abc\"),\n _get_message_output_run_item(\"123\"),", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_removes_tools_from_new_items", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_removes_tools_from_new_items():\n handoff_input_data = HandoffInputData(\n input_history=(),\n pre_handoff_items=(),\n new_items=(\n _get_message_output_run_item(\"Hello\"),\n _get_tool_output_run_item(\"World\"),\n ),\n )\n filtered_data = remove_all_tools(handoff_input_data)", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_removes_tools_from_new_items_and_history", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_removes_tools_from_new_items_and_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_function_result_input_item(\"World\"),\n _get_message_input_item(\"Hello2\"),\n ),\n pre_handoff_items=(\n _get_message_output_run_item(\"123\"),\n _get_tool_output_run_item(\"456\"),", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "test_removes_handoffs_from_history", + "kind": 2, + "importPath": "tests.test_extension_filters", + "description": "tests.test_extension_filters", + "peekOfCode": "def test_removes_handoffs_from_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_handoff_input_item(\"World\"),\n ),\n pre_handoff_items=(\n _get_message_output_run_item(\"Hello\"),\n _get_tool_output_run_item(\"World\"),\n _get_handoff_output_run_item(\"World\"),", + "detail": "tests.test_extension_filters", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "class Foo(TypedDict):\n a: int\n b: str\nclass InnerModel(BaseModel):\n a: int\n b: str\nclass OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "InnerModel", + "kind": 6, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "class InnerModel(BaseModel):\n a: int\n b: str\nclass OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "OuterModel", + "kind": 6, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "class OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "MyClass", + "kind": 6, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "class MyClass:\n def foo(self, a: int, b: int = 5):\n return a + b\n def foo_ctx(self, ctx: RunContextWrapper[str], a: int, b: int = 5):\n return a + b\n @classmethod\n def bar(cls, a: int, b: int = 5):\n return a + b\n @classmethod\n def bar_ctx(cls, ctx: RunContextWrapper[str], a: int, b: int = 5):", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "MyEnum", + "kind": 6, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "class MyEnum(str, Enum):\n FOO = \"foo\"\n BAR = \"bar\"\n BAZ = \"baz\"\ndef enum_and_literal_function(a: MyEnum, b: Literal[\"a\", \"b\", \"c\"]) -> str:\n return f\"{a.value} {b}\"\ndef test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "no_args_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def no_args_function():\n \"\"\"This function has no args.\"\"\"\n return \"ok\"\ndef test_no_args_function():\n func_schema = function_schema(no_args_function)\n assert func_schema.params_json_schema.get(\"title\") == \"no_args_function_args\"\n assert func_schema.description == \"This function has no args.\"\n assert not func_schema.takes_context\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_no_args_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_no_args_function():\n func_schema = function_schema(no_args_function)\n assert func_schema.params_json_schema.get(\"title\") == \"no_args_function_args\"\n assert func_schema.description == \"This function has no args.\"\n assert not func_schema.takes_context\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function(*args, **kwargs_dict)\n assert result == \"ok\"\ndef no_args_function_with_context(ctx: RunContextWrapper[str]):", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "no_args_function_with_context", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def no_args_function_with_context(ctx: RunContextWrapper[str]):\n return \"ok\"\ndef test_no_args_function_with_context() -> None:\n func_schema = function_schema(no_args_function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function_with_context(context, *args, **kwargs_dict)\n assert result == \"ok\"", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_no_args_function_with_context", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_no_args_function_with_context() -> None:\n func_schema = function_schema(no_args_function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function_with_context(context, *args, **kwargs_dict)\n assert result == \"ok\"\ndef simple_function(a: int, b: int = 5):\n \"\"\"", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "simple_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def simple_function(a: int, b: int = 5):\n \"\"\"\n Args:\n a: The first argument\n b: The second argument\n Returns:\n The sum of a and b\n \"\"\"\n return a + b\ndef test_simple_function():", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_simple_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_simple_function():\n \"\"\"Test a function that has simple typed parameters and defaults.\"\"\"\n func_schema = function_schema(simple_function)\n # Check that the JSON schema is a dictionary with title, type, etc.\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"simple_function_args\"\n assert (\n func_schema.params_json_schema.get(\"properties\", {}).get(\"a\").get(\"description\")\n == \"The first argument\"\n )", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "varargs_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def varargs_function(x: int, *numbers: float, flag: bool = False, **kwargs: Any):\n return x, numbers, flag, kwargs\ndef test_varargs_function():\n \"\"\"Test a function that uses *args and **kwargs.\"\"\"\n func_schema = function_schema(varargs_function, strict_json_schema=False)\n # Check JSON schema structure\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"varargs_function_args\"\n # Valid input including *args in 'numbers' and **kwargs in 'kwargs'\n valid_input = {", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_varargs_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_varargs_function():\n \"\"\"Test a function that uses *args and **kwargs.\"\"\"\n func_schema = function_schema(varargs_function, strict_json_schema=False)\n # Check JSON schema structure\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"varargs_function_args\"\n # Valid input including *args in 'numbers' and **kwargs in 'kwargs'\n valid_input = {\n \"x\": 10,\n \"numbers\": [1.1, 2.2, 3.3],", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "complex_args_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))\n valid_input = {\n \"model\": model.model_dump(),", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_nested_data_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))\n valid_input = {\n \"model\": model.model_dump(),\n }\n parsed = func_schema.params_pydantic_model(**valid_input)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "complex_args_and_docs_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def complex_args_and_docs_function(model: OuterModel, some_flag: int = 0) -> str:\n \"\"\"\n This function takes a model and a flag, and returns a string.\n Args:\n model: A model with an inner and foo field\n some_flag: An optional flag with a default of 0\n Returns:\n A string with the values of the model and flag\n \"\"\"\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}, {some_flag or 0}\"", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_complex_args_and_docs_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_complex_args_and_docs_function():\n func_schema = function_schema(complex_args_and_docs_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_and_docs_function_args\"\n # Check docstring is parsed correctly\n properties = func_schema.params_json_schema.get(\"properties\", {})\n assert properties.get(\"model\").get(\"description\") == \"A model with an inner and foo field\"\n assert properties.get(\"some_flag\").get(\"description\") == \"An optional flag with a default of 0\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "function_with_context", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def function_with_context(ctx: RunContextWrapper[str], a: int, b: int = 5):\n return a + b\ndef test_function_with_context():\n func_schema = function_schema(function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = function_with_context(context, *args, **kwargs_dict)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_context", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_context():\n func_schema = function_schema(function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = function_with_context(context, *args, **kwargs_dict)\n assert result == 3\nclass MyClass:", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_class_based_functions", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_class_based_functions():\n context = RunContextWrapper(context=\"test\")\n # Instance method\n instance = MyClass()\n func_schema = function_schema(instance.foo)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"foo_args\"\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "enum_and_literal_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def enum_and_literal_function(a: MyEnum, b: Literal[\"a\", \"b\", \"c\"]) -> str:\n return f\"{a.value} {b}\"\ndef test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"\n # Check that the enum values are included in the JSON schema\n assert func_schema.params_json_schema.get(\"$defs\", {}).get(\"MyEnum\", {}).get(\"enum\") == [\n \"foo\",\n \"bar\",", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_enum_and_literal_function", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"\n # Check that the enum values are included in the JSON schema\n assert func_schema.params_json_schema.get(\"$defs\", {}).get(\"MyEnum\", {}).get(\"enum\") == [\n \"foo\",\n \"bar\",\n \"baz\",\n ]", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_run_context_in_non_first_position_raises_value_error", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_run_context_in_non_first_position_raises_value_error():\n # When a parameter (after the first) is annotated as RunContextWrapper,\n # function_schema() should raise a UserError.\n def func(a: int, context: RunContextWrapper) -> None:\n pass\n with pytest.raises(UserError):\n function_schema(func, use_docstring_info=False)\ndef test_var_positional_tuple_annotation():\n # When a function has a var-positional parameter annotated with a tuple type,\n # function_schema() should convert it into a field with type List[].", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_var_positional_tuple_annotation", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_var_positional_tuple_annotation():\n # When a function has a var-positional parameter annotated with a tuple type,\n # function_schema() should convert it into a field with type List[].\n def func(*args: tuple[int, ...]) -> int:\n total = 0\n for arg in args:\n total += sum(arg)\n return total\n fs = function_schema(func, use_docstring_info=False)\n properties = fs.params_json_schema.get(\"properties\", {})", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_var_keyword_dict_annotation", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_var_keyword_dict_annotation():\n # Case 3:\n # When a function has a var-keyword parameter annotated with a dict type,\n # function_schema() should convert it into a field with type Dict[, ].\n def func(**kwargs: dict[str, int]):\n return kwargs\n fs = function_schema(func, use_docstring_info=False, strict_json_schema=False)\n properties = fs.params_json_schema.get(\"properties\", {})\n # The name of the field is \"kwargs\", and it's a JSON object i.e. a dict.\n assert properties.get(\"kwargs\").get(\"type\") == \"object\"", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_schema_with_mapping_raises_strict_mode_error", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_schema_with_mapping_raises_strict_mode_error():\n \"\"\"A mapping type is not allowed in strict mode. Same for dicts. Ensure we raise a UserError.\"\"\"\n def func_with_mapping(test_one: Mapping[str, int]) -> str:\n return \"foo\"\n with pytest.raises(UserError):\n function_schema(func_with_mapping)\ndef test_name_override_without_docstring() -> None:\n \"\"\"name_override should be used even when not parsing docstrings.\"\"\"\n def foo(x: int) -> int:\n return x", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_name_override_without_docstring", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_name_override_without_docstring() -> None:\n \"\"\"name_override should be used even when not parsing docstrings.\"\"\"\n def foo(x: int) -> int:\n return x\n fs = function_schema(foo, use_docstring_info=False, name_override=\"custom\")\n assert fs.name == \"custom\"\n assert fs.params_json_schema.get(\"title\") == \"custom_args\"\ndef test_function_with_field_required_constraints():\n \"\"\"Test function with required Field parameter that has constraints.\"\"\"\n def func_with_field_constraints(my_number: int = Field(..., gt=10, le=100)) -> int:", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_required_constraints", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_required_constraints():\n \"\"\"Test function with required Field parameter that has constraints.\"\"\"\n def func_with_field_constraints(my_number: int = Field(..., gt=10, le=100)) -> int:\n return my_number * 2\n fs = function_schema(func_with_field_constraints, use_docstring_info=False)\n # Check that the schema includes the constraints\n properties = fs.params_json_schema.get(\"properties\", {})\n my_number_schema = properties.get(\"my_number\", {})\n assert my_number_schema.get(\"type\") == \"integer\"\n assert my_number_schema.get(\"exclusiveMinimum\") == 10 # gt=10", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_optional_with_default", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_optional_with_default():\n \"\"\"Test function with optional Field parameter that has default and constraints.\"\"\"\n def func_with_optional_field(\n required_param: str,\n optional_param: float = Field(default=5.0, ge=0.0),\n ) -> str:\n return f\"{required_param}: {optional_param}\"\n fs = function_schema(func_with_optional_field, use_docstring_info=False)\n # Check that the schema includes the constraints and description\n properties = fs.params_json_schema.get(\"properties\", {})", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_description_merge", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_description_merge():\n \"\"\"Test that Field descriptions are merged with docstring descriptions.\"\"\"\n def func_with_field_and_docstring(\n param_with_field_desc: int = Field(..., description=\"Field description\"),\n param_with_both: str = Field(default=\"hello\", description=\"Field description\"),\n ) -> str:\n \"\"\"\n Function with both field and docstring descriptions.\n Args:\n param_with_field_desc: Docstring description", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "func_with_field_desc_only", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def func_with_field_desc_only(\n param_with_field_desc: int = Field(..., description=\"Field description only\"),\n param_without_desc: str = Field(default=\"hello\"),\n) -> str:\n return f\"{param_with_field_desc}: {param_without_desc}\"\ndef test_function_with_field_description_only():\n \"\"\"Test that Field descriptions are used when no docstring info.\"\"\"\n fs = function_schema(func_with_field_desc_only)\n # Check that field description is used when no docstring\n properties = fs.params_json_schema.get(\"properties\", {})", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_description_only", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_description_only():\n \"\"\"Test that Field descriptions are used when no docstring info.\"\"\"\n fs = function_schema(func_with_field_desc_only)\n # Check that field description is used when no docstring\n properties = fs.params_json_schema.get(\"properties\", {})\n param1_schema = properties.get(\"param_with_field_desc\", {})\n param2_schema = properties.get(\"param_without_desc\", {})\n assert param1_schema.get(\"description\") == \"Field description only\"\n assert param2_schema.get(\"description\") is None\ndef test_function_with_field_string_constraints():", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_string_constraints", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_string_constraints():\n \"\"\"Test function with Field parameter that has string-specific constraints.\"\"\"\n def func_with_string_field(\n name: str = Field(..., min_length=3, max_length=20, pattern=r\"^[A-Za-z]+$\"),\n ) -> str:\n return f\"Hello, {name}!\"\n fs = function_schema(func_with_string_field, use_docstring_info=False)\n # Check that the schema includes string constraints\n properties = fs.params_json_schema.get(\"properties\", {})\n name_schema = properties.get(\"name\", {})", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "test_function_with_field_multiple_constraints", + "kind": 2, + "importPath": "tests.test_function_schema", + "description": "tests.test_function_schema", + "peekOfCode": "def test_function_with_field_multiple_constraints():\n \"\"\"Test function with multiple Field parameters having different constraint types.\"\"\"\n def func_with_multiple_field_constraints(\n score: int = Field(..., ge=0, le=100, description=\"Score from 0 to 100\"),\n name: str = Field(default=\"Unknown\", min_length=1, max_length=50),\n factor: float = Field(default=1.0, gt=0.0, description=\"Positive multiplier\"),\n ) -> str:\n final_score = score * factor\n return f\"{name} scored {final_score}\"\n fs = function_schema(func_with_multiple_field_constraints, use_docstring_info=False)", + "detail": "tests.test_function_schema", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "class Foo(BaseModel):\n a: int\n b: int = 5\nclass Bar(TypedDict):\n x: str\n y: int\ndef complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "Bar", + "kind": 6, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "class Bar(TypedDict):\n x: str\n y: int\ndef complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():\n tool = function_tool(complex_args_function, failure_error_function=None)\n assert tool.name == \"complex_args_function\"\n valid_json = json.dumps(", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "BoolCtx", + "kind": 6, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "class BoolCtx(BaseModel):\n enable_tools: bool\n@pytest.mark.asyncio\nasync def test_is_enabled_bool_and_callable():\n @function_tool(is_enabled=False)\n def disabled_tool():\n return \"nope\"\n async def cond_enabled(ctx: RunContextWrapper[BoolCtx], agent: AgentBase) -> bool:\n return ctx.context.enable_tools\n @function_tool(is_enabled=cond_enabled)", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "argless_function", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def argless_function() -> str:\n return \"ok\"\n@pytest.mark.asyncio\nasync def test_argless_function():\n tool = function_tool(argless_function)\n assert tool.name == \"argless_function\"\n result = await tool.on_invoke_tool(\n ToolContext(context=None, tool_name=tool.name, tool_call_id=\"1\"), \"\"\n )\n assert result == \"ok\"", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "argless_with_context", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def argless_with_context(ctx: ToolContext[str]) -> str:\n return \"ok\"\n@pytest.mark.asyncio\nasync def test_argless_with_context():\n tool = function_tool(argless_with_context)\n assert tool.name == \"argless_with_context\"\n result = await tool.on_invoke_tool(ToolContext(None, tool_name=tool.name, tool_call_id=\"1\"), \"\")\n assert result == \"ok\"\n # Extra JSON should not raise an error\n result = await tool.on_invoke_tool(", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "simple_function", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def simple_function(a: int, b: int = 5):\n return a + b\n@pytest.mark.asyncio\nasync def test_simple_function():\n tool = function_tool(simple_function, failure_error_function=None)\n assert tool.name == \"simple_function\"\n result = await tool.on_invoke_tool(\n ToolContext(None, tool_name=tool.name, tool_call_id=\"1\"), '{\"a\": 1}'\n )\n assert result == 6", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "complex_args_function", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():\n tool = function_tool(complex_args_function, failure_error_function=None)\n assert tool.name == \"complex_args_function\"\n valid_json = json.dumps(\n {\n \"foo\": Foo(a=1).model_dump(),\n \"bar\": Bar(x=\"hello\", y=10),", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "test_function_config_overrides", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def test_function_config_overrides():\n tool = function_tool(simple_function, name_override=\"custom_name\")\n assert tool.name == \"custom_name\"\n tool = function_tool(simple_function, description_override=\"custom description\")\n assert tool.description == \"custom description\"\n tool = function_tool(\n simple_function,\n name_override=\"custom_name\",\n description_override=\"custom description\",\n )", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "test_func_schema_is_strict", + "kind": 2, + "importPath": "tests.test_function_tool", + "description": "tests.test_function_tool", + "peekOfCode": "def test_func_schema_is_strict():\n tool = function_tool(simple_function)\n assert tool.strict_json_schema, \"Should be strict by default\"\n assert (\n \"additionalProperties\" in tool.params_json_schema\n and not tool.params_json_schema[\"additionalProperties\"]\n )\n tool = function_tool(complex_args_function)\n assert tool.strict_json_schema, \"Should be strict by default\"\n assert (", + "detail": "tests.test_function_tool", + "documentation": {} + }, + { + "label": "DummyContext", + "kind": 6, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "class DummyContext:\n def __init__(self):\n self.data = \"something\"\ndef ctx_wrapper() -> ToolContext[DummyContext]:\n return ToolContext(context=DummyContext(), tool_name=\"dummy\", tool_call_id=\"1\")\n@function_tool\ndef sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "ctx_wrapper", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def ctx_wrapper() -> ToolContext[DummyContext]:\n return ToolContext(context=DummyContext(), tool_name=\"dummy\", tool_call_id=\"1\")\n@function_tool\ndef sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():\n tool = sync_no_context_no_args\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"test_1\"", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "sync_no_context_no_args", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():\n tool = sync_no_context_no_args\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"test_1\"\n@function_tool\ndef sync_no_context_with_args(a: int, b: int) -> int:\n return a + b", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "sync_no_context_with_args", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def sync_no_context_with_args(a: int, b: int) -> int:\n return a + b\n@pytest.mark.asyncio\nasync def test_sync_no_context_with_args_invocation():\n tool = sync_no_context_with_args\n input_data = {\"a\": 5, \"b\": 7}\n output = await tool.on_invoke_tool(ctx_wrapper(), json.dumps(input_data))\n assert int(output) == 12\n@function_tool\ndef sync_with_context(ctx: ToolContext[DummyContext], name: str) -> str:", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "sync_with_context", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def sync_with_context(ctx: ToolContext[DummyContext], name: str) -> str:\n return f\"{name}_{ctx.context.data}\"\n@pytest.mark.asyncio\nasync def test_sync_with_context_invocation():\n tool = sync_with_context\n input_data = {\"name\": \"Alice\"}\n output = await tool.on_invoke_tool(ctx_wrapper(), json.dumps(input_data))\n assert output == \"Alice_something\"\n@function_tool\nasync def async_no_context(a: int, b: int) -> int:", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "sync_no_context_override", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def sync_no_context_override() -> str:\n return \"override_result\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_override_invocation():\n tool = sync_no_context_override\n assert tool.name == \"my_custom_tool\"\n assert tool.description == \"custom desc\"\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"override_result\"\n@function_tool(failure_error_function=None)", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "will_fail_on_bad_json", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def will_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_error_on_invalid_json():\n tool = will_fail_on_bad_json\n # Passing an invalid JSON string\n with pytest.raises(Exception) as exc_info:\n await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert \"Invalid JSON input for tool\" in str(exc_info.value)\ndef sync_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "sync_error_handler", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def sync_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"\n@function_tool(failure_error_function=sync_error_handler)\ndef will_not_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json():\n tool = will_not_fail_on_bad_json\n # Passing an invalid JSON string\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "will_not_fail_on_bad_json", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def will_not_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json():\n tool = will_not_fail_on_bad_json\n # Passing an invalid JSON string\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"\ndef async_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "async_error_handler", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def async_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"\n@function_tool(failure_error_function=sync_error_handler)\ndef will_not_fail_on_bad_json_async(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json_async():\n tool = will_not_fail_on_bad_json_async\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "will_not_fail_on_bad_json_async", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def will_not_fail_on_bad_json_async(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json_async():\n tool = will_not_fail_on_bad_json_async\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"\n@function_tool(strict_mode=False)\ndef optional_param_function(a: int, b: Optional[int] = None) -> str:\n if b is None:", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "optional_param_function", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def optional_param_function(a: int, b: Optional[int] = None) -> str:\n if b is None:\n return f\"{a}_no_b\"\n return f\"{a}_{b}\"\n@pytest.mark.asyncio\nasync def test_non_strict_mode_function():\n tool = optional_param_function\n assert tool.strict_json_schema is False, \"strict_json_schema should be False\"\n assert tool.params_json_schema.get(\"required\") == [\"a\"], \"required should only be a\"\n input_data = {\"a\": 5}", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "all_optional_params_function", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def all_optional_params_function(\n x: int = 42,\n y: str = \"hello\",\n z: Optional[int] = None,\n) -> str:\n if z is None:\n return f\"{x}_{y}_no_z\"\n return f\"{x}_{y}_{z}\"\n@pytest.mark.asyncio\nasync def test_all_optional_params_function():", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "get_weather", + "kind": 2, + "importPath": "tests.test_function_tool_decorator", + "description": "tests.test_function_tool_decorator", + "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\n Args:\n city: The city to get the weather for.\n \"\"\"\n return f\"The weather in {city} is sunny.\"\n@pytest.mark.asyncio\nasync def test_extract_descriptions_from_docstring():\n \"\"\"Ensure that we extract function and param descriptions from docstrings.\"\"\"\n tool = get_weather", + "detail": "tests.test_function_tool_decorator", + "documentation": {} + }, + { + "label": "RunHooksForTests", + "kind": 6, + "importPath": "tests.test_global_hooks", + "description": "tests.test_global_hooks", + "peekOfCode": "class RunHooksForTests(RunHooks):\n def __init__(self):\n self.events: dict[str, int] = defaultdict(int)\n def reset(self):\n self.events.clear()\n async def on_agent_start(\n self, context: RunContextWrapper[TContext], agent: Agent[TContext]\n ) -> None:\n self.events[\"on_agent_start\"] += 1\n async def on_agent_end(", + "detail": "tests.test_global_hooks", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_global_hooks", + "description": "tests.test_global_hooks", + "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_agent_hooks():\n hooks = RunHooksForTests()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", + "detail": "tests.test_global_hooks", + "documentation": {} + }, + { + "label": "get_sync_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def get_sync_guardrail(triggers: bool, output_info: Any | None = None):\n def sync_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return sync_guardrail\n@pytest.mark.asyncio", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "get_async_input_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def get_async_input_guardrail(triggers: bool, output_info: Any | None = None):\n async def async_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return async_guardrail\n@pytest.mark.asyncio", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "get_sync_output_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def get_sync_output_guardrail(triggers: bool, output_info: Any | None = None):\n def sync_guardrail(context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return sync_guardrail\n@pytest.mark.asyncio\nasync def test_sync_output_guardrail():\n guardrail = OutputGuardrail(guardrail_function=get_sync_output_guardrail(triggers=False))", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "get_async_output_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def get_async_output_guardrail(triggers: bool, output_info: Any | None = None):\n async def async_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return async_guardrail\n@pytest.mark.asyncio", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "decorated_input_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def decorated_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_1\",\n tripwire_triggered=False,\n )\n@input_guardrail(name=\"Custom name\")\ndef decorated_named_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "decorated_named_input_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def decorated_named_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_2\",\n tripwire_triggered=False,\n )\n@pytest.mark.asyncio\nasync def test_input_guardrail_decorators():\n guardrail = decorated_input_guardrail", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "decorated_output_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def decorated_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_3\",\n tripwire_triggered=False,\n )\n@output_guardrail(name=\"Custom name\")\ndef decorated_named_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "decorated_named_output_guardrail", + "kind": 2, + "importPath": "tests.test_guardrails", + "description": "tests.test_guardrails", + "peekOfCode": "def decorated_named_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_4\",\n tripwire_triggered=False,\n )\n@pytest.mark.asyncio\nasync def test_output_guardrail_decorators():\n guardrail = decorated_output_guardrail", + "detail": "tests.test_guardrails", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_handoff_input_type():\n async def _on_handoff(ctx: RunContextWrapper[Any], input: Foo):\n pass\n agent = Agent(name=\"test\")\n obj = handoff(agent, input_type=Foo, on_handoff=_on_handoff)\n for key, value in Foo.model_json_schema().items():\n assert obj.input_json_schema[key] == value", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "message_item", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def message_item(content: str, agent: Agent[Any]) -> MessageOutputItem:\n return MessageOutputItem(\n agent=agent,\n raw_item=ResponseOutputMessage(\n id=\"123\",\n status=\"completed\",\n role=\"assistant\",\n type=\"message\",\n content=[ResponseOutputText(text=content, type=\"output_text\", annotations=[])],\n ),", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "get_len", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def get_len(data: HandoffInputData) -> int:\n input_len = len(data.input_history) if isinstance(data.input_history, tuple) else 1\n pre_handoff_len = len(data.pre_handoff_items)\n new_items_len = len(data.new_items)\n return input_len + pre_handoff_len + new_items_len\n@pytest.mark.asyncio\nasync def test_single_handoff_setup():\n agent_1 = Agent(name=\"test_1\")\n agent_2 = Agent(name=\"test_2\", handoffs=[agent_1])\n assert not agent_1.handoffs", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "test_handoff_input_data", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def test_handoff_input_data():\n agent = Agent(name=\"test\")\n data = HandoffInputData(\n input_history=\"\",\n pre_handoff_items=(),\n new_items=(),\n )\n assert get_len(data) == 1\n data = HandoffInputData(\n input_history=({\"role\": \"user\", \"content\": \"foo\"},),", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "test_handoff_input_schema_is_strict", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def test_handoff_input_schema_is_strict():\n agent = Agent(name=\"test\")\n obj = handoff(agent, input_type=Foo, on_handoff=lambda ctx, input: None)\n for key, value in Foo.model_json_schema().items():\n assert obj.input_json_schema[key] == value\n assert obj.strict_json_schema, \"Input schema should be strict\"\n assert (\n \"additionalProperties\" in obj.input_json_schema\n and not obj.input_json_schema[\"additionalProperties\"]\n ), \"Input schema should be strict and have additionalProperties=False\"", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "test_get_transfer_message_is_valid_json", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def test_get_transfer_message_is_valid_json() -> None:\n agent = Agent(name=\"foo\")\n obj = handoff(agent)\n transfer = obj.get_transfer_message(agent)\n assert json.loads(transfer) == {\"assistant\": agent.name}\ndef test_handoff_is_enabled_bool():\n \"\"\"Test that handoff respects is_enabled boolean parameter.\"\"\"\n agent = Agent(name=\"test\")\n # Test enabled handoff (default)\n handoff_enabled = handoff(agent)", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "test_handoff_is_enabled_bool", + "kind": 2, + "importPath": "tests.test_handoff_tool", + "description": "tests.test_handoff_tool", + "peekOfCode": "def test_handoff_is_enabled_bool():\n \"\"\"Test that handoff respects is_enabled boolean parameter.\"\"\"\n agent = Agent(name=\"test\")\n # Test enabled handoff (default)\n handoff_enabled = handoff(agent)\n assert handoff_enabled.is_enabled is True\n # Test explicitly enabled handoff\n handoff_explicit_enabled = handoff(agent, is_enabled=True)\n assert handoff_explicit_enabled.is_enabled is True\n # Test disabled handoff", + "detail": "tests.test_handoff_tool", + "documentation": {} + }, + { + "label": "make_message", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def make_message(\n content_items: list[ResponseOutputText | ResponseOutputRefusal],\n) -> ResponseOutputMessage:\n \"\"\"\n Helper to construct a ResponseOutputMessage with a single batch of content\n items, using a fixed id/status.\n \"\"\"\n return ResponseOutputMessage(\n id=\"msg123\",\n content=content_items,", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_extract_last_content_of_text_message", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_extract_last_content_of_text_message() -> None:\n # Build a message containing two text segments.\n content1 = ResponseOutputText(annotations=[], text=\"Hello \", type=\"output_text\")\n content2 = ResponseOutputText(annotations=[], text=\"world!\", type=\"output_text\")\n message = make_message([content1, content2])\n # Helpers should yield the last segment's text.\n assert ItemHelpers.extract_last_content(message) == \"world!\"\ndef test_extract_last_content_of_refusal_message() -> None:\n # Build a message whose last content entry is a refusal.\n content1 = ResponseOutputText(annotations=[], text=\"Before refusal\", type=\"output_text\")", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_extract_last_content_of_refusal_message", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_extract_last_content_of_refusal_message() -> None:\n # Build a message whose last content entry is a refusal.\n content1 = ResponseOutputText(annotations=[], text=\"Before refusal\", type=\"output_text\")\n refusal = ResponseOutputRefusal(refusal=\"I cannot do that\", type=\"refusal\")\n message = make_message([content1, refusal])\n # Helpers should extract the refusal string when last content is a refusal.\n assert ItemHelpers.extract_last_content(message) == \"I cannot do that\"\ndef test_extract_last_content_non_message_returns_empty() -> None:\n # Construct some other type of output item, e.g. a tool call, to verify non-message returns \"\".\n tool_call = ResponseFunctionToolCall(", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_extract_last_content_non_message_returns_empty", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_extract_last_content_non_message_returns_empty() -> None:\n # Construct some other type of output item, e.g. a tool call, to verify non-message returns \"\".\n tool_call = ResponseFunctionToolCall(\n id=\"tool123\",\n arguments=\"{}\",\n call_id=\"call123\",\n name=\"func\",\n type=\"function_call\",\n )\n assert ItemHelpers.extract_last_content(tool_call) == \"\"", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_extract_last_text_returns_text_only", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_extract_last_text_returns_text_only() -> None:\n # A message whose last segment is text yields the text.\n first_text = ResponseOutputText(annotations=[], text=\"part1\", type=\"output_text\")\n second_text = ResponseOutputText(annotations=[], text=\"part2\", type=\"output_text\")\n message = make_message([first_text, second_text])\n assert ItemHelpers.extract_last_text(message) == \"part2\"\n # Whereas when last content is a refusal, extract_last_text returns None.\n message2 = make_message([first_text, ResponseOutputRefusal(refusal=\"no\", type=\"refusal\")])\n assert ItemHelpers.extract_last_text(message2) is None\ndef test_input_to_new_input_list_from_string() -> None:", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_input_to_new_input_list_from_string", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_input_to_new_input_list_from_string() -> None:\n result = ItemHelpers.input_to_new_input_list(\"hi\")\n # Should wrap the string into a list with a single dict containing content and user role.\n assert isinstance(result, list)\n assert result == [{\"content\": \"hi\", \"role\": \"user\"}]\ndef test_input_to_new_input_list_deep_copies_lists() -> None:\n # Given a list of message dictionaries, ensure the returned list is a deep copy.\n original: list[TResponseInputItem] = [{\"content\": \"abc\", \"role\": \"developer\"}]\n new_list = ItemHelpers.input_to_new_input_list(original)\n assert new_list == original", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_input_to_new_input_list_deep_copies_lists", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_input_to_new_input_list_deep_copies_lists() -> None:\n # Given a list of message dictionaries, ensure the returned list is a deep copy.\n original: list[TResponseInputItem] = [{\"content\": \"abc\", \"role\": \"developer\"}]\n new_list = ItemHelpers.input_to_new_input_list(original)\n assert new_list == original\n # Mutating the returned list should not mutate the original.\n new_list.pop()\n assert \"content\" in original[0] and original[0].get(\"content\") == \"abc\"\ndef test_text_message_output_concatenates_text_segments() -> None:\n # Build a message with both text and refusal segments, only text segments are concatenated.", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_text_message_output_concatenates_text_segments", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_text_message_output_concatenates_text_segments() -> None:\n # Build a message with both text and refusal segments, only text segments are concatenated.\n pieces: list[ResponseOutputText | ResponseOutputRefusal] = []\n pieces.append(ResponseOutputText(annotations=[], text=\"a\", type=\"output_text\"))\n pieces.append(ResponseOutputRefusal(refusal=\"denied\", type=\"refusal\"))\n pieces.append(ResponseOutputText(annotations=[], text=\"b\", type=\"output_text\"))\n message = make_message(pieces)\n # Wrap into MessageOutputItem to feed into text_message_output.\n item = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message)\n assert ItemHelpers.text_message_output(item) == \"ab\"", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_text_message_outputs_across_list_of_runitems", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_text_message_outputs_across_list_of_runitems() -> None:\n \"\"\"\n Compose several RunItem instances, including a non-message run item, and ensure\n that only MessageOutputItem instances contribute any text. The non-message\n (ReasoningItem) should be ignored by Helpers.text_message_outputs.\n \"\"\"\n message1 = make_message([ResponseOutputText(annotations=[], text=\"foo\", type=\"output_text\")])\n message2 = make_message([ResponseOutputText(annotations=[], text=\"bar\", type=\"output_text\")])\n item1: RunItem = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message1)\n item2: RunItem = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message2)", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_tool_call_output_item_constructs_function_call_output_dict", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_tool_call_output_item_constructs_function_call_output_dict():\n # Build a simple ResponseFunctionToolCall.\n call = ResponseFunctionToolCall(\n id=\"call-abc\",\n arguments='{\"x\": 1}',\n call_id=\"call-abc\",\n name=\"do_something\",\n type=\"function_call\",\n )\n payload = ItemHelpers.tool_call_output_item(call, \"result-string\")", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_message", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_message() -> None:\n \"\"\"An output message should convert into an input dict matching the message's own structure.\"\"\"\n content = ResponseOutputText(annotations=[], text=\"hello world\", type=\"output_text\")\n message = ResponseOutputMessage(\n id=\"m1\", content=[content], role=\"assistant\", status=\"completed\", type=\"message\"\n )\n resp = ModelResponse(output=[message], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n # The dict should contain exactly the primitive values of the message", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_function_call", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_function_call() -> None:\n \"\"\"A function tool call output should produce the same dict as a function tool call input.\"\"\"\n tool_call = ResponseFunctionToolCall(\n id=\"f1\", arguments=\"{}\", call_id=\"c1\", name=\"func\", type=\"function_call\"\n )\n resp = ModelResponse(output=[tool_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n expected: ResponseFunctionToolCallParam = {\n \"id\": \"f1\",", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_file_search_call", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_file_search_call() -> None:\n \"\"\"A file search tool call output should produce the same dict as a file search input.\"\"\"\n fs_call = ResponseFileSearchToolCall(\n id=\"fs1\", queries=[\"query\"], status=\"completed\", type=\"file_search_call\"\n )\n resp = ModelResponse(output=[fs_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n expected: ResponseFileSearchToolCallParam = {\n \"id\": \"fs1\",", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_web_search_call", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_web_search_call() -> None:\n \"\"\"A web search tool call output should produce the same dict as a web search input.\"\"\"\n ws_call = ResponseFunctionWebSearch(\n id=\"w1\",\n action=ActionSearch(type=\"search\", query=\"query\"),\n status=\"completed\",\n type=\"web_search_call\",\n )\n resp = ModelResponse(output=[ws_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_computer_call_click", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_computer_call_click() -> None:\n \"\"\"A computer call output should yield a dict whose shape matches the computer call input.\"\"\"\n action = ActionScreenshot(type=\"screenshot\")\n comp_call = ResponseComputerToolCall(\n id=\"comp1\",\n action=action,\n type=\"computer_call\",\n call_id=\"comp1\",\n pending_safety_checks=[],\n status=\"completed\",", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "test_to_input_items_for_reasoning", + "kind": 2, + "importPath": "tests.test_items_helpers", + "description": "tests.test_items_helpers", + "peekOfCode": "def test_to_input_items_for_reasoning() -> None:\n \"\"\"A reasoning output should produce the same dict as a reasoning input item.\"\"\"\n rc = Summary(text=\"why\", type=\"summary_text\")\n reasoning = ResponseReasoningItem(id=\"rid1\", summary=[rc], type=\"reasoning\")\n resp = ModelResponse(output=[reasoning], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n converted_dict = input_items[0]\n expected: ResponseReasoningItemParam = {\n \"id\": \"rid1\",", + "detail": "tests.test_items_helpers", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_max_turns", + "description": "tests.test_max_turns", + "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_max_turns():\n model = FakeModel()\n agent = Agent(\n name=\"test_1\",\n model=model,\n output_type=Foo,\n tools=[get_function_tool(\"tool_1\", \"result\")],", + "detail": "tests.test_max_turns", + "documentation": {} + }, + { + "label": "test_store_param", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions", + "description": "tests.test_openai_chatcompletions", + "peekOfCode": "def test_store_param():\n \"\"\"Should default to True for OpenAI API calls, and False otherwise.\"\"\"\n model_settings = ModelSettings()\n client = AsyncOpenAI()\n assert ChatCmplHelpers.get_store_param(client, model_settings) is True, (\n \"Should default to True for OpenAI API calls\"\n )\n model_settings = ModelSettings(store=False)\n assert ChatCmplHelpers.get_store_param(client, model_settings) is False, (\n \"Should respect explicitly set store=False\"", + "detail": "tests.test_openai_chatcompletions", + "documentation": {} + }, + { + "label": "TestObject", + "kind": 6, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "class TestObject:\n pass\ndef test_unknown_object_errors():\n \"\"\"\n Test that unknown objects are converted correctly.\n \"\"\"\n with pytest.raises(UserError, match=\"Unhandled item type or structure\"):\n # Purposely ignore the type error\n Converter.items_to_messages([TestObject()]) # type: ignore\ndef test_assistant_messages_in_history():", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_message_to_output_items_with_text_only", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_message_to_output_items_with_text_only():\n \"\"\"\n Make sure a simple ChatCompletionMessage with string content is converted\n into a single ResponseOutputMessage containing one ResponseOutputText.\n \"\"\"\n msg = ChatCompletionMessage(role=\"assistant\", content=\"Hello\")\n items = Converter.message_to_output_items(msg)\n # Expect exactly one output item (the message)\n assert len(items) == 1\n message_item = cast(ResponseOutputMessage, items[0])", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_message_to_output_items_with_refusal", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_message_to_output_items_with_refusal():\n \"\"\"\n Make sure a message with a refusal string produces a ResponseOutputMessage\n with a ResponseOutputRefusal content part.\n \"\"\"\n msg = ChatCompletionMessage(role=\"assistant\", refusal=\"I'm sorry\")\n items = Converter.message_to_output_items(msg)\n assert len(items) == 1\n message_item = cast(ResponseOutputMessage, items[0])\n assert len(message_item.content) == 1", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_message_to_output_items_with_tool_call", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_message_to_output_items_with_tool_call():\n \"\"\"\n If the ChatCompletionMessage contains one or more tool_calls, they should\n be reflected as separate `ResponseFunctionToolCall` items appended after\n the message item.\n \"\"\"\n tool_call = ChatCompletionMessageToolCall(\n id=\"tool1\",\n type=\"function\",\n function=Function(name=\"myfn\", arguments='{\"x\":1}'),", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_items_to_messages_with_string_user_content", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_items_to_messages_with_string_user_content():\n \"\"\"\n A simple string as the items argument should be converted into a user\n message param dict with the same content.\n \"\"\"\n result = Converter.items_to_messages(\"Ask me anything\")\n assert isinstance(result, list)\n assert len(result) == 1\n msg = result[0]\n assert msg[\"role\"] == \"user\"", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_items_to_messages_with_easy_input_message", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_items_to_messages_with_easy_input_message():\n \"\"\"\n Given an easy input message dict (just role/content), the converter should\n produce the appropriate ChatCompletionMessageParam with the same content.\n \"\"\"\n items: list[TResponseInputItem] = [\n {\n \"role\": \"user\",\n \"content\": \"How are you?\",\n }", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_items_to_messages_with_output_message_and_function_call", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_items_to_messages_with_output_message_and_function_call():\n \"\"\"\n Given a sequence of one ResponseOutputMessageParam followed by a\n ResponseFunctionToolCallParam, the converter should produce a single\n ChatCompletionAssistantMessageParam that includes both the assistant's\n textual content and a populated `tool_calls` reflecting the function call.\n \"\"\"\n # Construct output message param dict with two content parts.\n output_text: ResponseOutputText = ResponseOutputText(\n text=\"Part 1\",", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_convert_tool_choice_handles_standard_and_named_options", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_convert_tool_choice_handles_standard_and_named_options() -> None:\n \"\"\"\n The `Converter.convert_tool_choice` method should return NOT_GIVEN\n if no choice is provided, pass through values like \"auto\", \"required\",\n or \"none\" unchanged, and translate any other string into a function\n selection dict.\n \"\"\"\n assert Converter.convert_tool_choice(None).__class__.__name__ == \"NotGiven\"\n assert Converter.convert_tool_choice(\"auto\") == \"auto\"\n assert Converter.convert_tool_choice(\"required\") == \"required\"", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas() -> None:\n \"\"\"\n The `Converter.convert_response_format` method should return NOT_GIVEN\n when no output schema is provided or if the output schema indicates\n plain text. For structured output schemas, it should return a dict\n with type `json_schema` and include the generated JSON schema and\n strict flag from the provided `AgentOutputSchema`.\n \"\"\"\n # when output is plain text (schema None or output_type str), do not include response_format\n assert Converter.convert_response_format(None).__class__.__name__ == \"NotGiven\"", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_items_to_messages_with_function_output_item", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_items_to_messages_with_function_output_item():\n \"\"\"\n A function call output item should be converted into a tool role message\n dict with the appropriate tool_call_id and content.\n \"\"\"\n func_output_item: FunctionCallOutput = {\n \"type\": \"function_call_output\",\n \"call_id\": \"somecall\",\n \"output\": '{\"foo\": \"bar\"}',\n }", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_extract_all_and_text_content_for_strings_and_lists", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_extract_all_and_text_content_for_strings_and_lists():\n \"\"\"\n The converter provides helpers for extracting user-supplied message content\n either as a simple string or as a list of `input_text` dictionaries.\n When passed a bare string, both `extract_all_content` and\n `extract_text_content` should return the string unchanged.\n When passed a list of input dictionaries, `extract_all_content` should\n produce a list of `ChatCompletionContentPart` dicts, and `extract_text_content`\n should filter to only the textual parts.\n \"\"\"", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_items_to_messages_handles_system_and_developer_roles", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_items_to_messages_handles_system_and_developer_roles():\n \"\"\"\n Roles other than `user` (e.g. `system` and `developer`) need to be\n converted appropriately whether provided as simple dicts or as full\n `message` typed dicts.\n \"\"\"\n sys_items: list[TResponseInputItem] = [{\"role\": \"system\", \"content\": \"setup\"}]\n sys_msgs = Converter.items_to_messages(sys_items)\n assert len(sys_msgs) == 1\n assert sys_msgs[0][\"role\"] == \"system\"", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_maybe_input_message_allows_message_typed_dict", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_maybe_input_message_allows_message_typed_dict():\n \"\"\"\n The `Converter.maybe_input_message` should recognize a dict with\n \"type\": \"message\" and a supported role as an input message. Ensure\n that such dicts are passed through by `items_to_messages`.\n \"\"\"\n # Construct a dict with the proper required keys for a ResponseInputParam.Message\n message_dict: TResponseInputItem = {\n \"type\": \"message\",\n \"role\": \"user\",", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_tool_call_conversion", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_tool_call_conversion():\n \"\"\"\n Test that tool calls are converted correctly.\n \"\"\"\n function_call = ResponseFunctionToolCallParam(\n id=\"tool1\",\n call_id=\"abc\",\n name=\"math\",\n arguments=\"{}\",\n type=\"function_call\",", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_input_message_with_all_roles", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_input_message_with_all_roles(role: str):\n \"\"\"\n The `Converter.maybe_input_message` should recognize a dict with\n \"type\": \"message\" and a supported role as an input message. Ensure\n that such dicts are passed through by `items_to_messages`.\n \"\"\"\n # Construct a dict with the proper required keys for a ResponseInputParam.Message\n casted_role = cast(Literal[\"user\", \"system\", \"developer\"], role)\n message_dict: TResponseInputItem = {\n \"type\": \"message\",", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_item_reference_errors", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_item_reference_errors():\n \"\"\"\n Test that item references are converted correctly.\n \"\"\"\n with pytest.raises(UserError):\n Converter.items_to_messages(\n [\n {\n \"type\": \"item_reference\",\n \"id\": \"item1\",", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_unknown_object_errors", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_unknown_object_errors():\n \"\"\"\n Test that unknown objects are converted correctly.\n \"\"\"\n with pytest.raises(UserError, match=\"Unhandled item type or structure\"):\n # Purposely ignore the type error\n Converter.items_to_messages([TestObject()]) # type: ignore\ndef test_assistant_messages_in_history():\n \"\"\"\n Test that assistant messages are added to the history.", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_assistant_messages_in_history", + "kind": 2, + "importPath": "tests.test_openai_chatcompletions_converter", + "description": "tests.test_openai_chatcompletions_converter", + "peekOfCode": "def test_assistant_messages_in_history():\n \"\"\"\n Test that assistant messages are added to the history.\n \"\"\"\n messages = Converter.items_to_messages(\n [\n {\n \"role\": \"user\",\n \"content\": \"Hello\",\n },", + "detail": "tests.test_openai_chatcompletions_converter", + "documentation": {} + }, + { + "label": "test_convert_tool_choice_standard_values", + "kind": 2, + "importPath": "tests.test_openai_responses_converter", + "description": "tests.test_openai_responses_converter", + "peekOfCode": "def test_convert_tool_choice_standard_values():\n \"\"\"\n Make sure that the standard tool_choice values map to themselves or\n to \"auto\"/\"required\"/\"none\" as appropriate, and that special string\n values map to the appropriate dicts.\n \"\"\"\n assert Converter.convert_tool_choice(None) is NOT_GIVEN\n assert Converter.convert_tool_choice(\"auto\") == \"auto\"\n assert Converter.convert_tool_choice(\"required\") == \"required\"\n assert Converter.convert_tool_choice(\"none\") == \"none\"", + "detail": "tests.test_openai_responses_converter", + "documentation": {} + }, + { + "label": "test_get_response_format_plain_text_and_json_schema", + "kind": 2, + "importPath": "tests.test_openai_responses_converter", + "description": "tests.test_openai_responses_converter", + "peekOfCode": "def test_get_response_format_plain_text_and_json_schema():\n \"\"\"\n For plain text output (default, or output type of `str`), the converter\n should return NOT_GIVEN, indicating no special response format constraint.\n If an output schema is provided for a structured type, the converter\n should return a `format` dict with the schema and strictness. The exact\n JSON schema depends on the output type; we just assert that required\n keys are present and that we get back the original schema.\n \"\"\"\n # Default output (None) should be considered plain text.", + "detail": "tests.test_openai_responses_converter", + "documentation": {} + }, + { + "label": "test_convert_tools_basic_types_and_includes", + "kind": 2, + "importPath": "tests.test_openai_responses_converter", + "description": "tests.test_openai_responses_converter", + "peekOfCode": "def test_convert_tools_basic_types_and_includes():\n \"\"\"\n Construct a variety of tool types and make sure `convert_tools` returns\n a matching list of tool param dicts and the expected includes. Also\n check that only a single computer tool is allowed.\n \"\"\"\n # Simple function tool\n tool_fn = function_tool(lambda a: \"x\", name_override=\"fn\")\n # File search tool with include_search_results set\n file_tool = FileSearchTool(", + "detail": "tests.test_openai_responses_converter", + "documentation": {} + }, + { + "label": "test_convert_tools_includes_handoffs", + "kind": 2, + "importPath": "tests.test_openai_responses_converter", + "description": "tests.test_openai_responses_converter", + "peekOfCode": "def test_convert_tools_includes_handoffs():\n \"\"\"\n When handoff objects are included, `convert_tools` should append their\n tool param dicts after tools and include appropriate descriptions.\n \"\"\"\n agent = Agent(name=\"support\", handoff_description=\"Handles support\")\n handoff_obj = handoff(agent)\n converted = Converter.convert_tools(tools=[], handoffs=[handoff_obj])\n assert isinstance(converted.tools, list)\n assert len(converted.tools) == 1", + "detail": "tests.test_openai_responses_converter", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "class Foo(BaseModel):\n bar: str\ndef test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Foo, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"Pydantic objects should not be wrapped\"\n for key, value in Foo.model_json_schema().items():", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "Bar", + "kind": 6, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "class Bar(TypedDict):\n bar: str\ndef test_structured_output_typed_dict():\n agent = Agent(name=\"test\", output_type=Bar)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Bar, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"TypedDicts should not be wrapped\"\n json_str = json.dumps(Bar(bar=\"baz\"))", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "CustomOutputSchema", + "kind": 6, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "class CustomOutputSchema(AgentOutputSchemaBase):\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"FooBarBaz\"\n def json_schema(self) -> dict[str, Any]:\n return _CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA\n def is_strict_json_schema(self) -> bool:\n return False\n def validate_json(self, json_str: str) -> Any:", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_plain_text_output", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_plain_text_output():\n agent = Agent(name=\"test\")\n output_schema = AgentRunner._get_output_schema(agent)\n assert not output_schema, \"Shouldn't have an output tool config without an output type\"\n agent = Agent(name=\"test\", output_type=str)\n assert not output_schema, \"Shouldn't have an output tool config with str output type\"\nclass Foo(BaseModel):\n bar: str\ndef test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_structured_output_pydantic", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Foo, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"Pydantic objects should not be wrapped\"\n for key, value in Foo.model_json_schema().items():\n assert output_schema.json_schema()[key] == value\n json_str = Foo(bar=\"baz\").model_dump_json()", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_structured_output_typed_dict", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_structured_output_typed_dict():\n agent = Agent(name=\"test\", output_type=Bar)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Bar, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"TypedDicts should not be wrapped\"\n json_str = json.dumps(Bar(bar=\"baz\"))\n validated = output_schema.validate_json(json_str)\n assert validated == Bar(bar=\"baz\")", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_structured_output_list", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_structured_output_list():\n agent = Agent(name=\"test\", output_type=list[str])\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == list[str], \"Should have the correct output type\"\n assert output_schema._is_wrapped, \"Lists should be wrapped\"\n # This is testing implementation details, but it's useful to make sure this doesn't break\n json_str = json.dumps({_WRAPPER_DICT_KEY: [\"foo\", \"bar\"]})\n validated = output_schema.validate_json(json_str)", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_bad_json_raises_error", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_bad_json_raises_error(mocker):\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n with pytest.raises(ModelBehaviorError):\n output_schema.validate_json(\"not valid json\")\n agent = Agent(name=\"test\", output_type=list[str])\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n mock_validate_json = mocker.patch.object(_json, \"validate_json\")", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_plain_text_obj_doesnt_produce_schema", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_plain_text_obj_doesnt_produce_schema():\n output_wrapper = AgentOutputSchema(output_type=str)\n with pytest.raises(UserError):\n output_wrapper.json_schema()\ndef test_structured_output_is_strict():\n output_wrapper = AgentOutputSchema(output_type=Foo)\n assert output_wrapper.is_strict_json_schema()\n for key, value in Foo.model_json_schema().items():\n assert output_wrapper.json_schema()[key] == value\n assert (", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_structured_output_is_strict", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_structured_output_is_strict():\n output_wrapper = AgentOutputSchema(output_type=Foo)\n assert output_wrapper.is_strict_json_schema()\n for key, value in Foo.model_json_schema().items():\n assert output_wrapper.json_schema()[key] == value\n assert (\n \"additionalProperties\" in output_wrapper.json_schema()\n and not output_wrapper.json_schema()[\"additionalProperties\"]\n )\ndef test_setting_strict_false_works():", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_setting_strict_false_works", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_setting_strict_false_works():\n output_wrapper = AgentOutputSchema(output_type=Foo, strict_json_schema=False)\n assert not output_wrapper.is_strict_json_schema()\n assert output_wrapper.json_schema() == Foo.model_json_schema()\n assert output_wrapper.json_schema() == Foo.model_json_schema()\n_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n },", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "test_custom_output_schema", + "kind": 2, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "def test_custom_output_schema():\n custom_output_schema = CustomOutputSchema()\n agent = Agent(name=\"test\", output_type=custom_output_schema)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, CustomOutputSchema)\n assert output_schema.json_schema() == _CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA\n assert not output_schema.is_strict_json_schema()\n assert not output_schema.is_plain_text()\n json_str = json.dumps({\"foo\": \"bar\"})", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA", + "kind": 5, + "importPath": "tests.test_output_tool", + "description": "tests.test_output_tool", + "peekOfCode": "_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n },\n \"required\": [\"foo\"],\n}\nclass CustomOutputSchema(AgentOutputSchemaBase):\n def is_plain_text(self) -> bool:\n return False", + "detail": "tests.test_output_tool", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_pretty_print", + "description": "tests.test_pretty_print", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_pretty_run_result_structured_output():\n model = FakeModel()\n model.set_next_output(\n [\n get_text_message(\"Test\"),\n get_final_output_message(Foo(bar=\"Hi there\").model_dump_json()),\n ]", + "detail": "tests.test_pretty_print", + "documentation": {} + }, + { + "label": "create_content_delta", + "kind": 2, + "importPath": "tests.test_reasoning_content", + "description": "tests.test_reasoning_content", + "peekOfCode": "def create_content_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with regular content\"\"\"\n return {\"content\": content, \"role\": None, \"function_call\": None, \"tool_calls\": None}\ndef create_reasoning_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with reasoning content. The Only difference is reasoning_content\"\"\"\n return {\n \"content\": None,\n \"role\": None,\n \"function_call\": None,\n \"tool_calls\": None,", + "detail": "tests.test_reasoning_content", + "documentation": {} + }, + { + "label": "create_reasoning_delta", + "kind": 2, + "importPath": "tests.test_reasoning_content", + "description": "tests.test_reasoning_content", + "peekOfCode": "def create_reasoning_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with reasoning content. The Only difference is reasoning_content\"\"\"\n return {\n \"content\": None,\n \"role\": None,\n \"function_call\": None,\n \"tool_calls\": None,\n \"reasoning_content\": content,\n }\ndef create_chunk(delta: dict[str, Any], include_usage: bool = False) -> ChatCompletionChunk:", + "detail": "tests.test_reasoning_content", + "documentation": {} + }, + { + "label": "create_chunk", + "kind": 2, + "importPath": "tests.test_reasoning_content", + "description": "tests.test_reasoning_content", + "peekOfCode": "def create_chunk(delta: dict[str, Any], include_usage: bool = False) -> ChatCompletionChunk:\n \"\"\"Create a ChatCompletionChunk with the given delta\"\"\"\n # Create a ChoiceDelta object from the dictionary\n delta_obj = ChoiceDelta(\n content=delta.get(\"content\"),\n role=delta.get(\"role\"),\n function_call=delta.get(\"function_call\"),\n tool_calls=delta.get(\"tool_calls\"),\n )\n # Add reasoning_content attribute dynamically if present in the delta", + "detail": "tests.test_reasoning_content", + "documentation": {} + }, + { + "label": "get_text_input_item", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_text_input_item(content: str) -> TResponseInputItem:\n return {\n \"content\": content,\n \"role\": \"user\",\n }\ndef get_text_message(content: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "get_text_message", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_text_message(content: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",\n content=[ResponseOutputText(text=content, type=\"output_text\", annotations=[])],\n status=\"completed\",\n )\ndef get_function_tool(\n name: str | None = None, return_value: str | None = None, hide_errors: bool = False", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "get_function_tool", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_function_tool(\n name: str | None = None, return_value: str | None = None, hide_errors: bool = False\n) -> FunctionTool:\n def _foo() -> str:\n return return_value or \"result_ok\"\n return function_tool(\n _foo,\n name_override=name,\n failure_error_function=None if hide_errors else default_tool_error_function,\n )", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "get_function_tool_call", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_function_tool_call(\n name: str, arguments: str | None = None, call_id: str | None = None\n) -> ResponseOutputItem:\n return ResponseFunctionToolCall(\n id=\"1\",\n call_id=call_id or \"2\",\n type=\"function_call\",\n name=name,\n arguments=arguments or \"\",\n )", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "get_handoff_tool_call", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_handoff_tool_call(\n to_agent: Agent[Any], override_name: str | None = None, args: str | None = None\n) -> ResponseOutputItem:\n name = override_name or Handoff.default_tool_name(to_agent)\n return get_function_tool_call(name, args)\ndef get_final_output_message(args: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "get_final_output_message", + "kind": 2, + "importPath": "tests.test_responses", + "description": "tests.test_responses", + "peekOfCode": "def get_final_output_message(args: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",\n content=[ResponseOutputText(text=args, type=\"output_text\", annotations=[])],\n status=\"completed\",\n )", + "detail": "tests.test_responses", + "documentation": {} + }, + { + "label": "DummyTracing", + "kind": 6, + "importPath": "tests.test_responses_tracing", + "description": "tests.test_responses_tracing", + "peekOfCode": "class DummyTracing:\n def is_disabled(self):\n return False\nclass DummyUsage:\n def __init__(\n self,\n input_tokens: int = 1,\n input_tokens_details: Optional[InputTokensDetails] = None,\n output_tokens: int = 1,\n output_tokens_details: Optional[OutputTokensDetails] = None,", + "detail": "tests.test_responses_tracing", + "documentation": {} + }, + { + "label": "DummyUsage", + "kind": 6, + "importPath": "tests.test_responses_tracing", + "description": "tests.test_responses_tracing", + "peekOfCode": "class DummyUsage:\n def __init__(\n self,\n input_tokens: int = 1,\n input_tokens_details: Optional[InputTokensDetails] = None,\n output_tokens: int = 1,\n output_tokens_details: Optional[OutputTokensDetails] = None,\n total_tokens: int = 2,\n ):\n self.input_tokens = input_tokens", + "detail": "tests.test_responses_tracing", + "documentation": {} + }, + { + "label": "DummyResponse", + "kind": 6, + "importPath": "tests.test_responses_tracing", + "description": "tests.test_responses_tracing", + "peekOfCode": "class DummyResponse:\n def __init__(self):\n self.id = \"dummy-id\"\n self.output = []\n self.usage = DummyUsage()\n def __aiter__(self):\n yield ResponseCompletedEvent(\n type=\"response.completed\",\n response=fake_model.get_response_obj(self.output),\n sequence_number=0,", + "detail": "tests.test_responses_tracing", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_result_cast", + "description": "tests.test_result_cast", + "peekOfCode": "class Foo(BaseModel):\n bar: int\ndef test_result_cast_typechecks():\n \"\"\"Correct casts should work fine.\"\"\"\n result = create_run_result(1)\n assert result.final_output_as(int) == 1\n result = create_run_result(\"test\")\n assert result.final_output_as(str) == \"test\"\n result = create_run_result(Foo(bar=1))\n assert result.final_output_as(Foo) == Foo(bar=1)", + "detail": "tests.test_result_cast", + "documentation": {} + }, + { + "label": "create_run_result", + "kind": 2, + "importPath": "tests.test_result_cast", + "description": "tests.test_result_cast", + "peekOfCode": "def create_run_result(final_output: Any) -> RunResult:\n return RunResult(\n input=\"test\",\n new_items=[],\n raw_responses=[],\n final_output=final_output,\n input_guardrail_results=[],\n output_guardrail_results=[],\n _last_agent=Agent(name=\"test\"),\n context_wrapper=RunContextWrapper(context=None),", + "detail": "tests.test_result_cast", + "documentation": {} + }, + { + "label": "test_result_cast_typechecks", + "kind": 2, + "importPath": "tests.test_result_cast", + "description": "tests.test_result_cast", + "peekOfCode": "def test_result_cast_typechecks():\n \"\"\"Correct casts should work fine.\"\"\"\n result = create_run_result(1)\n assert result.final_output_as(int) == 1\n result = create_run_result(\"test\")\n assert result.final_output_as(str) == \"test\"\n result = create_run_result(Foo(bar=1))\n assert result.final_output_as(Foo) == Foo(bar=1)\ndef test_bad_cast_doesnt_raise():\n \"\"\"Bad casts shouldn't error unless we ask for it.\"\"\"", + "detail": "tests.test_result_cast", + "documentation": {} + }, + { + "label": "test_bad_cast_doesnt_raise", + "kind": 2, + "importPath": "tests.test_result_cast", + "description": "tests.test_result_cast", + "peekOfCode": "def test_bad_cast_doesnt_raise():\n \"\"\"Bad casts shouldn't error unless we ask for it.\"\"\"\n result = create_run_result(1)\n result.final_output_as(str)\n result = create_run_result(\"test\")\n result.final_output_as(Foo)\ndef test_bad_cast_with_param_raises():\n \"\"\"Bad casts should raise a TypeError when we ask for it.\"\"\"\n result = create_run_result(1)\n with pytest.raises(TypeError):", + "detail": "tests.test_result_cast", + "documentation": {} + }, + { + "label": "test_bad_cast_with_param_raises", + "kind": 2, + "importPath": "tests.test_result_cast", + "description": "tests.test_result_cast", + "peekOfCode": "def test_bad_cast_with_param_raises():\n \"\"\"Bad casts should raise a TypeError when we ask for it.\"\"\"\n result = create_run_result(1)\n with pytest.raises(TypeError):\n result.final_output_as(str, raise_if_incorrect_type=True)\n result = create_run_result(\"test\")\n with pytest.raises(TypeError):\n result.final_output_as(Foo, raise_if_incorrect_type=True)\n result = create_run_result(Foo(bar=1))\n with pytest.raises(TypeError):", + "detail": "tests.test_result_cast", + "documentation": {} + }, + { + "label": "DummyProvider", + "kind": 6, + "importPath": "tests.test_run_config", + "description": "tests.test_run_config", + "peekOfCode": "class DummyProvider(ModelProvider):\n \"\"\"A simple model provider that always returns the same model, and\n records the model name it was asked to provide.\"\"\"\n def __init__(self, model_to_return: Model | None = None) -> None:\n self.last_requested: str | None = None\n self.model_to_return: Model = model_to_return or FakeModel()\n def get_model(self, model_name: str | None) -> Model:\n # record the requested model name and return our test model\n self.last_requested = model_name\n return self.model_to_return", + "detail": "tests.test_run_config", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_run_step_execution", + "description": "tests.test_run_step_execution", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_final_output_without_tool_runs_again():\n agent = Agent(name=\"test\", output_type=Foo, tools=[get_function_tool(\"tool_1\", \"result\")])\n response = ModelResponse(\n output=[get_function_tool_call(\"tool_1\")],\n usage=Usage(),\n response_id=None,\n )", + "detail": "tests.test_run_step_execution", + "documentation": {} + }, + { + "label": "assert_item_is_message", + "kind": 2, + "importPath": "tests.test_run_step_execution", + "description": "tests.test_run_step_execution", + "peekOfCode": "def assert_item_is_message(item: RunItem, text: str) -> None:\n assert isinstance(item, MessageOutputItem)\n assert item.raw_item.type == \"message\"\n assert item.raw_item.role == \"assistant\"\n assert item.raw_item.content[0].type == \"output_text\"\n assert item.raw_item.content[0].text == text\ndef assert_item_is_function_tool_call(\n item: RunItem, name: str, arguments: str | None = None\n) -> None:\n assert isinstance(item, ToolCallItem)", + "detail": "tests.test_run_step_execution", + "documentation": {} + }, + { + "label": "assert_item_is_function_tool_call", + "kind": 2, + "importPath": "tests.test_run_step_execution", + "description": "tests.test_run_step_execution", + "peekOfCode": "def assert_item_is_function_tool_call(\n item: RunItem, name: str, arguments: str | None = None\n) -> None:\n assert isinstance(item, ToolCallItem)\n assert item.raw_item.type == \"function_call\"\n assert item.raw_item.name == name\n assert not arguments or item.raw_item.arguments == arguments\ndef assert_item_is_function_tool_call_output(item: RunItem, output: str) -> None:\n assert isinstance(item, ToolCallOutputItem)\n assert item.raw_item[\"type\"] == \"function_call_output\"", + "detail": "tests.test_run_step_execution", + "documentation": {} + }, + { + "label": "assert_item_is_function_tool_call_output", + "kind": 2, + "importPath": "tests.test_run_step_execution", + "description": "tests.test_run_step_execution", + "peekOfCode": "def assert_item_is_function_tool_call_output(item: RunItem, output: str) -> None:\n assert isinstance(item, ToolCallOutputItem)\n assert item.raw_item[\"type\"] == \"function_call_output\"\n assert item.raw_item[\"output\"] == output\nasync def get_execute_result(\n agent: Agent[Any],\n response: ModelResponse,\n *,\n original_input: str | list[TResponseInputItem] | None = None,\n generated_items: list[RunItem] | None = None,", + "detail": "tests.test_run_step_execution", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_run_step_processing", + "description": "tests.test_run_step_processing", + "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_final_output_parsed_correctly():\n agent = Agent(name=\"test\", output_type=Foo)\n response = ModelResponse(\n output=[\n get_text_message(\"Hello, world!\"),\n get_final_output_message(Foo(bar=\"123\").model_dump_json()),\n ],", + "detail": "tests.test_run_step_processing", + "documentation": {} + }, + { + "label": "DummyComputer", + "kind": 6, + "importPath": "tests.test_run_step_processing", + "description": "tests.test_run_step_processing", + "peekOfCode": "class DummyComputer(Computer):\n \"\"\"Minimal computer implementation for testing.\"\"\"\n @property\n def environment(self):\n return \"mac\" # pragma: no cover\n @property\n def dimensions(self):\n return (0, 0) # pragma: no cover\n def screenshot(self) -> str:\n return \"\" # pragma: no cover", + "detail": "tests.test_run_step_processing", + "documentation": {} + }, + { + "label": "test_empty_response", + "kind": 2, + "importPath": "tests.test_run_step_processing", + "description": "tests.test_run_step_processing", + "peekOfCode": "def test_empty_response():\n agent = Agent(name=\"test\")\n response = ModelResponse(\n output=[],\n usage=Usage(),\n response_id=None,\n )\n result = RunImpl.process_model_response(\n agent=agent,\n response=response,", + "detail": "tests.test_run_step_processing", + "documentation": {} + }, + { + "label": "test_no_tool_calls", + "kind": 2, + "importPath": "tests.test_run_step_processing", + "description": "tests.test_run_step_processing", + "peekOfCode": "def test_no_tool_calls():\n agent = Agent(name=\"test\")\n response = ModelResponse(\n output=[get_text_message(\"Hello, world!\")],\n usage=Usage(),\n response_id=None,\n )\n result = RunImpl.process_model_response(\n agent=agent, response=response, output_schema=None, handoffs=[], all_tools=[]\n )", + "detail": "tests.test_run_step_processing", + "documentation": {} + }, + { + "label": "FakeRealtimeModel", + "kind": 6, + "importPath": "tests.test_session_exceptions", + "description": "tests.test_session_exceptions", + "peekOfCode": "class FakeRealtimeModel(RealtimeModel):\n \"\"\"Fake model for testing that forwards events to listeners.\"\"\"\n def __init__(self):\n self._listeners: list[RealtimeModelListener] = []\n self._events_to_send: list[RealtimeModelEvent] = []\n self._is_connected = False\n self._send_task: asyncio.Task[None] | None = None\n def set_next_events(self, events: list[RealtimeModelEvent]) -> None:\n \"\"\"Set events to be sent to listeners.\"\"\"\n self._events_to_send = events.copy()", + "detail": "tests.test_session_exceptions", + "documentation": {} + }, + { + "label": "TestSessionExceptions", + "kind": 6, + "importPath": "tests.test_session_exceptions", + "description": "tests.test_session_exceptions", + "peekOfCode": "class TestSessionExceptions:\n \"\"\"Test exception handling in RealtimeSession.\"\"\"\n @pytest.mark.asyncio\n async def test_end_to_end_exception_propagation_and_cleanup(\n self, fake_model: FakeRealtimeModel, fake_agent\n ):\n \"\"\"Test that exceptions are stored, trigger cleanup, and are raised in __aiter__.\"\"\"\n # Create test exception\n test_exception = ValueError(\"Test error\")\n exception_event = RealtimeModelExceptionEvent(", + "detail": "tests.test_session_exceptions", + "documentation": {} + }, + { + "label": "fake_agent", + "kind": 2, + "importPath": "tests.test_session_exceptions", + "description": "tests.test_session_exceptions", + "peekOfCode": "def fake_agent():\n \"\"\"Create a fake agent for testing.\"\"\"\n agent = Mock()\n agent.get_all_tools = AsyncMock(return_value=[])\n return agent\n@pytest.fixture\ndef fake_model():\n \"\"\"Create a fake model for testing.\"\"\"\n return FakeRealtimeModel()\nclass TestSessionExceptions:", + "detail": "tests.test_session_exceptions", + "documentation": {} + }, + { + "label": "fake_model", + "kind": 2, + "importPath": "tests.test_session_exceptions", + "description": "tests.test_session_exceptions", + "peekOfCode": "def fake_model():\n \"\"\"Create a fake model for testing.\"\"\"\n return FakeRealtimeModel()\nclass TestSessionExceptions:\n \"\"\"Test exception handling in RealtimeSession.\"\"\"\n @pytest.mark.asyncio\n async def test_end_to_end_exception_propagation_and_cleanup(\n self, fake_model: FakeRealtimeModel, fake_agent\n ):\n \"\"\"Test that exceptions are stored, trigger cleanup, and are raised in __aiter__.\"\"\"", + "detail": "tests.test_session_exceptions", + "documentation": {} + }, + { + "label": "test_empty_schema_has_additional_properties_false", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_empty_schema_has_additional_properties_false():\n strict_schema = ensure_strict_json_schema({})\n assert strict_schema[\"additionalProperties\"] is False\ndef test_non_dict_schema_errors():\n with pytest.raises(TypeError):\n ensure_strict_json_schema([]) # type: ignore\ndef test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_non_dict_schema_errors", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_non_dict_schema_errors():\n with pytest.raises(TypeError):\n ensure_strict_json_schema([]) # type: ignore\ndef test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"object\"\n assert result[\"additionalProperties\"] is False", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_object_without_additional_properties", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"object\"\n assert result[\"additionalProperties\"] is False\n assert result[\"required\"] == [\"a\"]\n # The inner property remains unchanged (no additionalProperties is added for non-object types)\n assert result[\"properties\"][\"a\"] == {\"type\": \"string\"}", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_object_with_true_additional_properties", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_object_with_true_additional_properties():\n # If additionalProperties is explicitly set to True for an object, a UserError should be raised.\n schema = {\n \"type\": \"object\",\n \"properties\": {\"a\": {\"type\": \"number\"}},\n \"additionalProperties\": True,\n }\n with pytest.raises(UserError):\n ensure_strict_json_schema(schema)\ndef test_array_items_processing_and_default_removal():", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_array_items_processing_and_default_removal", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_array_items_processing_and_default_removal():\n # When processing an array, the items schema is processed recursively.\n # Also, any \"default\": None should be removed.\n schema = {\n \"type\": \"array\",\n \"items\": {\"type\": \"number\", \"default\": None},\n }\n result = ensure_strict_json_schema(schema)\n # \"default\" should be stripped from the items schema.\n assert \"default\" not in result[\"items\"]", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_anyOf_processing", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_anyOf_processing():\n # Test that anyOf schemas are processed.\n schema = {\n \"anyOf\": [\n {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}},\n {\"type\": \"number\", \"default\": None},\n ]\n }\n result = ensure_strict_json_schema(schema)\n # For the first variant: object type should get additionalProperties and required keys set.", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_allOf_single_entry_merging", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_allOf_single_entry_merging():\n # When an allOf list has a single entry, its content should be merged into the parent.\n schema = {\n \"type\": \"object\",\n \"allOf\": [{\"properties\": {\"a\": {\"type\": \"boolean\"}}}],\n }\n result = ensure_strict_json_schema(schema)\n # allOf should be removed and merged.\n assert \"allOf\" not in result\n # The object should now have additionalProperties set and required set.", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_default_removal_on_non_object", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_default_removal_on_non_object():\n # Test that \"default\": None is stripped from schemas that are not objects.\n schema = {\"type\": \"string\", \"default\": None}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"string\"\n assert \"default\" not in result\ndef test_ref_expansion():\n # Construct a schema with a definitions section and a property with a $ref.\n schema = {\n \"definitions\": {\"refObj\": {\"type\": \"string\", \"default\": None}},", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_ref_expansion", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_ref_expansion():\n # Construct a schema with a definitions section and a property with a $ref.\n schema = {\n \"definitions\": {\"refObj\": {\"type\": \"string\", \"default\": None}},\n \"type\": \"object\",\n \"properties\": {\"a\": {\"$ref\": \"#/definitions/refObj\", \"description\": \"desc\"}},\n }\n result = ensure_strict_json_schema(schema)\n a_schema = result[\"properties\"][\"a\"]\n # The $ref should be expanded so that the type is from the referenced definition,", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_ref_no_expansion_when_alone", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_ref_no_expansion_when_alone():\n # If the schema only contains a $ref key, it should not be expanded.\n schema = {\"$ref\": \"#/definitions/refObj\"}\n result = ensure_strict_json_schema(schema)\n # Because there is only one key, the $ref remains unchanged.\n assert result == {\"$ref\": \"#/definitions/refObj\"}\ndef test_invalid_ref_format():\n # A $ref that does not start with \"#/\" should trigger a ValueError when resolved.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"$ref\": \"invalid\", \"description\": \"desc\"}}}\n with pytest.raises(ValueError):", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "test_invalid_ref_format", + "kind": 2, + "importPath": "tests.test_strict_schema", + "description": "tests.test_strict_schema", + "peekOfCode": "def test_invalid_ref_format():\n # A $ref that does not start with \"#/\" should trigger a ValueError when resolved.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"$ref\": \"invalid\", \"description\": \"desc\"}}}\n with pytest.raises(ValueError):\n ensure_strict_json_schema(schema)", + "detail": "tests.test_strict_schema", + "documentation": {} + }, + { + "label": "TestToolChoiceReset", + "kind": 6, + "importPath": "tests.test_tool_choice_reset", + "description": "tests.test_tool_choice_reset", + "peekOfCode": "class TestToolChoiceReset:\n def test_should_reset_tool_choice_direct(self):\n \"\"\"\n Test the _should_reset_tool_choice method directly with various inputs\n to ensure it correctly identifies cases where reset is needed.\n \"\"\"\n agent = Agent(name=\"test_agent\")\n # Case 1: Empty tool use tracker should not change the \"None\" tool choice\n model_settings = ModelSettings(tool_choice=None)\n tracker = AgentToolUseTracker()", + "detail": "tests.test_tool_choice_reset", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_tool_converter", + "description": "tests.test_tool_converter", + "peekOfCode": "class Foo(BaseModel):\n a: str\n b: list[int]\ndef test_convert_handoff_tool():\n agent = Agent(name=\"test_1\", handoff_description=\"test_2\")\n handoff_obj = handoff(agent=agent)\n result = Converter.convert_handoff_tool(handoff_obj)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == Handoff.default_tool_name(agent)\n assert result[\"function\"].get(\"description\") == Handoff.default_tool_description(agent)", + "detail": "tests.test_tool_converter", + "documentation": {} + }, + { + "label": "some_function", + "kind": 2, + "importPath": "tests.test_tool_converter", + "description": "tests.test_tool_converter", + "peekOfCode": "def some_function(a: str, b: list[int]) -> str:\n return \"hello\"\ndef test_to_openai_with_function_tool():\n some_function(a=\"foo\", b=[1, 2, 3])\n tool = function_tool(some_function)\n result = Converter.tool_to_openai(tool)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == \"some_function\"\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None", + "detail": "tests.test_tool_converter", + "documentation": {} + }, + { + "label": "test_to_openai_with_function_tool", + "kind": 2, + "importPath": "tests.test_tool_converter", + "description": "tests.test_tool_converter", + "peekOfCode": "def test_to_openai_with_function_tool():\n some_function(a=\"foo\", b=[1, 2, 3])\n tool = function_tool(some_function)\n result = Converter.tool_to_openai(tool)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == \"some_function\"\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None\n properties = params.get(\"properties\", {})\n assert isinstance(properties, dict)", + "detail": "tests.test_tool_converter", + "documentation": {} + }, + { + "label": "test_convert_handoff_tool", + "kind": 2, + "importPath": "tests.test_tool_converter", + "description": "tests.test_tool_converter", + "peekOfCode": "def test_convert_handoff_tool():\n agent = Agent(name=\"test_1\", handoff_description=\"test_2\")\n handoff_obj = handoff(agent=agent)\n result = Converter.convert_handoff_tool(handoff_obj)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == Handoff.default_tool_name(agent)\n assert result[\"function\"].get(\"description\") == Handoff.default_tool_description(agent)\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None\n for key, value in handoff_obj.input_json_schema.items():", + "detail": "tests.test_tool_converter", + "documentation": {} + }, + { + "label": "test_tool_converter_hosted_tools_errors", + "kind": 2, + "importPath": "tests.test_tool_converter", + "description": "tests.test_tool_converter", + "peekOfCode": "def test_tool_converter_hosted_tools_errors():\n with pytest.raises(UserError):\n Converter.tool_to_openai(WebSearchTool())\n with pytest.raises(UserError):\n Converter.tool_to_openai(FileSearchTool(vector_store_ids=[\"abc\"], max_num_results=1))", + "detail": "tests.test_tool_converter", + "documentation": {} + }, + { + "label": "get_span", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def get_span(processor: TracingProcessor) -> SpanImpl[AgentSpanData]:\n \"\"\"Create a minimal agent span for testing processors.\"\"\"\n return SpanImpl(\n trace_id=\"test_trace_id\",\n span_id=\"test_span_id\",\n parent_id=None,\n processor=processor,\n span_data=AgentSpanData(name=\"test_agent\"),\n )\ndef get_trace(processor: TracingProcessor) -> TraceImpl:", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "get_trace", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def get_trace(processor: TracingProcessor) -> TraceImpl:\n \"\"\"Create a minimal trace.\"\"\"\n return TraceImpl(\n name=\"test_trace\",\n trace_id=\"test_trace_id\",\n group_id=\"test_session_id\",\n metadata={},\n processor=processor,\n )\n@pytest.fixture", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "mocked_exporter", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def mocked_exporter():\n exporter = MagicMock()\n exporter.export = MagicMock()\n return exporter\ndef test_batch_trace_processor_on_trace_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_trace = get_trace(processor)\n processor.on_trace_start(test_trace)\n assert processor._queue.qsize() == 1, \"Trace should be added to the queue\"\n # Shutdown to clean up the worker thread", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_on_trace_start", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_on_trace_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_trace = get_trace(processor)\n processor.on_trace_start(test_trace)\n assert processor._queue.qsize() == 1, \"Trace should be added to the queue\"\n # Shutdown to clean up the worker thread\n processor.shutdown()\ndef test_batch_trace_processor_on_span_end(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_span = get_span(processor)", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_on_span_end", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_on_span_end(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_span = get_span(processor)\n processor.on_span_end(test_span)\n assert processor._queue.qsize() == 1, \"Span should be added to the queue\"\n # Shutdown to clean up the worker thread\n processor.shutdown()\ndef test_batch_trace_processor_queue_full(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_queue_size=2, schedule_delay=0.1)\n # Fill the queue", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_queue_full", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_queue_full(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_queue_size=2, schedule_delay=0.1)\n # Fill the queue\n processor.on_trace_start(get_trace(processor))\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.full() is True\n # Next item should not be queued\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.qsize() == 2, \"Queue should not exceed max_queue_size\"\n processor.on_span_end(get_span(processor))", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_processor_doesnt_enqueue_on_trace_end_or_span_start", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_processor_doesnt_enqueue_on_trace_end_or_span_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter)\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.qsize() == 1, \"Trace should be queued\"\n processor.on_span_start(get_span(processor))\n assert processor._queue.qsize() == 1, \"Span should not be queued\"\n processor.on_span_end(get_span(processor))\n assert processor._queue.qsize() == 2, \"Span should be queued\"\n processor.on_trace_end(get_trace(processor))\n assert processor._queue.qsize() == 2, \"Nothing new should be queued\"", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_force_flush", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_force_flush(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_batch_size=2, schedule_delay=5.0)\n processor.on_trace_start(get_trace(processor))\n processor.on_span_end(get_span(processor))\n processor.on_span_end(get_span(processor))\n processor.force_flush()\n # Ensure exporter.export was called with all items\n # Because max_batch_size=2, it may have been called multiple times\n total_exported = 0\n for call_args in mocked_exporter.export.call_args_list:", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_shutdown_flushes", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_shutdown_flushes(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=5.0)\n processor.on_trace_start(get_trace(processor))\n processor.on_span_end(get_span(processor))\n qsize_before = processor._queue.qsize()\n assert qsize_before == 2\n processor.shutdown()\n # Ensure everything was exported after shutdown\n total_exported = 0\n for call_args in mocked_exporter.export.call_args_list:", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_batch_trace_processor_scheduled_export", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_batch_trace_processor_scheduled_export(mocked_exporter):\n \"\"\"\n Tests that items are automatically exported when the schedule_delay expires.\n We mock time.time() so we can trigger the condition without waiting in real time.\n \"\"\"\n with patch(\"time.time\") as mock_time:\n base_time = 1000.0\n mock_time.return_value = base_time\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=1.0)\n processor.on_span_end(get_span(processor)) # queue size = 1", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "patched_time_sleep", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def patched_time_sleep():\n \"\"\"\n Fixture to replace time.sleep with a no-op to speed up tests\n that rely on retry/backoff logic.\n \"\"\"\n with patch(\"time.sleep\") as mock_sleep:\n yield mock_sleep\ndef mock_processor():\n processor = MagicMock()\n processor.on_trace_start = MagicMock()", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "mock_processor", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def mock_processor():\n processor = MagicMock()\n processor.on_trace_start = MagicMock()\n processor.on_span_end = MagicMock()\n return processor\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_no_items(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([])\n # No calls should be made if there are no items", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_no_items", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_no_items(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([])\n # No calls should be made if there are no items\n mock_client.return_value.post.assert_not_called()\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_no_api_key(mock_client):\n # Ensure that os.environ is empty (sometimes devs have the openai api key set in their env)\n with patch.dict(os.environ, {}, clear=True):", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_no_api_key", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_no_api_key(mock_client):\n # Ensure that os.environ is empty (sometimes devs have the openai api key set in their env)\n with patch.dict(os.environ, {}, clear=True):\n exporter = BackendSpanExporter(api_key=None)\n exporter.export([get_span(mock_processor())])\n # Should log an error and return without calling post\n mock_client.return_value.post.assert_not_called()\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_2xx_success(mock_client):", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_2xx_success", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_2xx_success(mock_client):\n mock_response = MagicMock()\n mock_response.status_code = 200\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([get_span(mock_processor()), get_trace(mock_processor())])\n # Should have called post exactly once\n mock_client.return_value.post.assert_called_once()\n exporter.close()\n@patch(\"httpx.Client\")", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_4xx_client_error", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_4xx_client_error(mock_client):\n mock_response = MagicMock()\n mock_response.status_code = 400\n mock_response.text = \"Bad Request\"\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([get_span(mock_processor())])\n # 4xx should not be retried\n mock_client.return_value.post.assert_called_once()\n exporter.close()", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_5xx_retry", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_5xx_retry(mock_client, patched_time_sleep):\n mock_response = MagicMock()\n mock_response.status_code = 500\n # Make post() return 500 every time\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\", max_retries=3, base_delay=0.1, max_delay=0.2)\n exporter.export([get_span(mock_processor())])\n # Should retry up to max_retries times\n assert mock_client.return_value.post.call_count == 3\n exporter.close()", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_request_error", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_request_error(mock_client, patched_time_sleep):\n # Make post() raise a RequestError each time\n mock_client.return_value.post.side_effect = httpx.RequestError(\"Network error\")\n exporter = BackendSpanExporter(api_key=\"test_key\", max_retries=2, base_delay=0.1, max_delay=0.2)\n exporter.export([get_span(mock_processor())])\n # Should retry up to max_retries times\n assert mock_client.return_value.post.call_count == 2\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_close(mock_client):", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "test_backend_span_exporter_close", + "kind": 2, + "importPath": "tests.test_trace_processor", + "description": "tests.test_trace_processor", + "peekOfCode": "def test_backend_span_exporter_close(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.close()\n # Ensure underlying http client is closed\n mock_client.return_value.close.assert_called_once()", + "detail": "tests.test_trace_processor", + "documentation": {} + }, + { + "label": "standard_span_checks", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def standard_span_checks(\n span: Span[Any], trace_id: str, parent_id: str | None, span_type: str\n) -> None:\n assert span.span_id is not None\n assert span.trace_id == trace_id\n assert span.parent_id == parent_id\n assert span.started_at is not None\n assert span.ended_at is not None\n assert span.span_data.type == span_type\ndef standard_trace_checks(trace: Trace, name_check: str | None = None) -> None:", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "standard_trace_checks", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def standard_trace_checks(trace: Trace, name_check: str | None = None) -> None:\n assert trace.trace_id is not None\n if name_check:\n assert trace.name == name_check\n### TESTS\ndef simple_tracing():\n x = trace(\"test\")\n x.start()\n span_1 = agent_span(name=\"agent_1\", span_id=\"span_1\", parent=x)\n span_1.start()", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "simple_tracing", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def simple_tracing():\n x = trace(\"test\")\n x.start()\n span_1 = agent_span(name=\"agent_1\", span_id=\"span_1\", parent=x)\n span_1.start()\n span_1.finish()\n span_2 = custom_span(name=\"custom_1\", span_id=\"span_2\", parent=x)\n span_2.start()\n span_3 = custom_span(name=\"custom_2\", span_id=\"span_3\", parent=span_2)\n span_3.start()", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_simple_tracing", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_simple_tracing() -> None:\n simple_tracing()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"children\": [\n {\n \"type\": \"agent\",\n \"id\": \"span_1\",", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "ctxmanager_spans", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def ctxmanager_spans():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\", group_id=\"456\"):\n with custom_span(name=\"custom_1\", span_id=\"span_1\"):\n with custom_span(name=\"custom_2\", span_id=\"span_1_inner\"):\n pass\n with custom_span(name=\"custom_2\", span_id=\"span_2\"):\n pass\ndef test_ctxmanager_spans() -> None:\n ctxmanager_spans()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_ctxmanager_spans", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_ctxmanager_spans() -> None:\n ctxmanager_spans()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"group_id\": \"456\",\n \"children\": [\n {\n \"type\": \"custom\",", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "spans_with_setters", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def spans_with_setters():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\", group_id=\"456\"):\n with agent_span(name=\"agent_1\") as span_a:\n span_a.span_data.name = \"agent_2\"\n with function_span(name=\"function_1\") as span_b:\n span_b.span_data.input = \"i\"\n span_b.span_data.output = \"o\"\n with generation_span() as span_c:\n span_c.span_data.input = [{\"foo\": \"bar\"}]\n with handoff_span(from_agent=\"agent_1\", to_agent=\"agent_2\"):", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_spans_with_setters", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_spans_with_setters() -> None:\n spans_with_setters()\n assert fetch_normalized_spans() == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"group_id\": \"456\",\n \"children\": [\n {\n \"type\": \"agent\",", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "disabled_tracing", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def disabled_tracing():\n with trace(workflow_name=\"test\", trace_id=\"123\", group_id=\"456\", disabled=True):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\"):\n pass\ndef test_disabled_tracing():\n disabled_tracing()\n assert_no_traces()\ndef enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_disabled_tracing", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_disabled_tracing():\n disabled_tracing()\n assert_no_traces()\ndef enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\", disabled=True):\n with generation_span():\n pass\ndef test_enabled_trace_disabled_span():", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "enabled_trace_disabled_span", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\", disabled=True):\n with generation_span():\n pass\ndef test_enabled_trace_disabled_span():\n enabled_trace_disabled_span()\n assert fetch_normalized_spans() == snapshot(\n [", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_enabled_trace_disabled_span", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_enabled_trace_disabled_span():\n enabled_trace_disabled_span()\n assert fetch_normalized_spans() == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"children\": [\n {\n \"type\": \"agent\",\n \"data\": {\"name\": \"agent_1\"},", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_start_and_end_called_manual", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_start_and_end_called_manual():\n simple_tracing()\n events = fetch_events()\n assert events == [\n \"trace_start\",\n \"span_start\", # span_1\n \"span_end\", # span_1\n \"span_start\", # span_2\n \"span_start\", # span_3\n \"span_end\", # span_3", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "test_start_and_end_called_ctxmanager", + "kind": 2, + "importPath": "tests.test_tracing", + "description": "tests.test_tracing", + "peekOfCode": "def test_start_and_end_called_ctxmanager():\n with trace(workflow_name=\"test\", trace_id=\"123\", group_id=\"456\"):\n with custom_span(name=\"custom_1\", span_id=\"span_1\"):\n with custom_span(name=\"custom_2\", span_id=\"span_1_inner\"):\n pass\n with custom_span(name=\"custom_2\", span_id=\"span_2\"):\n pass\n events = fetch_events()\n assert events == [\n \"trace_start\",", + "detail": "tests.test_tracing", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_tracing_errors", + "description": "tests.test_tracing_errors", + "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_multiple_final_output_doesnt_error():\n model = FakeModel(tracing_enabled=True)\n agent_1 = Agent(\n name=\"test\",\n model=model,\n output_type=Foo,\n )", + "detail": "tests.test_tracing_errors", + "documentation": {} + }, + { + "label": "guardrail_function", + "kind": 2, + "importPath": "tests.test_tracing_errors", + "description": "tests.test_tracing_errors", + "peekOfCode": "def guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_guardrail_error():\n agent = Agent(", + "detail": "tests.test_tracing_errors", + "documentation": {} + }, + { + "label": "Foo", + "kind": 6, + "importPath": "tests.test_tracing_errors_streamed", + "description": "tests.test_tracing_errors_streamed", + "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_multiple_final_output_no_error():\n model = FakeModel(tracing_enabled=True)\n agent_1 = Agent(\n name=\"test\",\n model=model,\n output_type=Foo,\n )", + "detail": "tests.test_tracing_errors_streamed", + "documentation": {} + }, + { + "label": "input_guardrail_function", + "kind": 2, + "importPath": "tests.test_tracing_errors_streamed", + "description": "tests.test_tracing_errors_streamed", + "peekOfCode": "def input_guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_input_guardrail_error():\n model = FakeModel()", + "detail": "tests.test_tracing_errors_streamed", + "documentation": {} + }, + { + "label": "output_guardrail_function", + "kind": 2, + "importPath": "tests.test_tracing_errors_streamed", + "description": "tests.test_tracing_errors_streamed", + "peekOfCode": "def output_guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_output_guardrail_error():\n model = FakeModel()", + "detail": "tests.test_tracing_errors_streamed", + "documentation": {} + }, + { + "label": "test_usage_add_aggregates_all_fields", + "kind": 2, + "importPath": "tests.test_usage", + "description": "tests.test_usage", + "peekOfCode": "def test_usage_add_aggregates_all_fields():\n u1 = Usage(\n requests=1,\n input_tokens=10,\n input_tokens_details=InputTokensDetails(cached_tokens=3),\n output_tokens=20,\n output_tokens_details=OutputTokensDetails(reasoning_tokens=5),\n total_tokens=30,\n )\n u2 = Usage(", + "detail": "tests.test_usage", + "documentation": {} + }, + { + "label": "test_usage_add_aggregates_with_none_values", + "kind": 2, + "importPath": "tests.test_usage", + "description": "tests.test_usage", + "peekOfCode": "def test_usage_add_aggregates_with_none_values():\n u1 = Usage()\n u2 = Usage(\n requests=2,\n input_tokens=7,\n input_tokens_details=InputTokensDetails(cached_tokens=4),\n output_tokens=8,\n output_tokens_details=OutputTokensDetails(reasoning_tokens=6),\n total_tokens=15,\n )", + "detail": "tests.test_usage", + "documentation": {} + }, + { + "label": "mock_agent", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def mock_agent():\n tool1 = Mock()\n tool1.name = \"Tool1\"\n tool2 = Mock()\n tool2.name = \"Tool2\"\n handoff1 = Mock(spec=Handoff)\n handoff1.agent_name = \"Handoff1\"\n agent = Mock(spec=Agent)\n agent.name = \"Agent1\"\n agent.tools = [tool1, tool2]", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "test_get_main_graph", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def test_get_main_graph(mock_agent):\n result = get_main_graph(mock_agent)\n print(result)\n assert \"digraph G\" in result\n assert \"graph [splines=true];\" in result\n assert 'node [fontname=\"Arial\"];' in result\n assert \"edge [penwidth=1.5];\" in result\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "test_get_all_nodes", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def test_get_all_nodes(mock_agent):\n result = get_all_nodes(mock_agent)\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result\n )\n assert (\n '\"__end__\" [label=\"__end__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result\n )", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "test_get_all_edges", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def test_get_all_edges(mock_agent):\n result = get_all_edges(mock_agent)\n assert '\"__start__\" -> \"Agent1\";' in result\n assert '\"Agent1\" -> \"__end__\";'\n assert '\"Agent1\" -> \"Tool1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Tool1\" -> \"Agent1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Agent1\" -> \"Tool2\" [style=dotted, penwidth=1.5];' in result\n assert '\"Tool2\" -> \"Agent1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Agent1\" -> \"Handoff1\";' in result\ndef test_draw_graph(mock_agent):", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "test_draw_graph", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def test_draw_graph(mock_agent):\n graph = draw_graph(mock_agent)\n assert isinstance(graph, graphviz.Source)\n assert \"digraph G\" in graph.source\n assert \"graph [splines=true];\" in graph.source\n assert 'node [fontname=\"Arial\"];' in graph.source\n assert \"edge [penwidth=1.5];\" in graph.source\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in graph.source", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "test_cycle_detection", + "kind": 2, + "importPath": "tests.test_visualization", + "description": "tests.test_visualization", + "peekOfCode": "def test_cycle_detection():\n agent_a = Agent(name=\"A\")\n agent_b = Agent(name=\"B\")\n agent_a.handoffs.append(agent_b)\n agent_b.handoffs.append(agent_a)\n nodes = get_all_nodes(agent_a)\n edges = get_all_edges(agent_a)\n assert nodes.count('\"A\" [label=\"A\"') == 1\n assert nodes.count('\"B\" [label=\"B\"') == 1\n assert '\"A\" -> \"B\"' in edges", + "detail": "tests.test_visualization", + "documentation": {} + }, + { + "label": "SpanProcessorForTests", + "kind": 6, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "class SpanProcessorForTests(TracingProcessor):\n \"\"\"\n A simple processor that stores finished spans in memory.\n This is thread-safe and suitable for tests or basic usage.\n \"\"\"\n def __init__(self) -> None:\n self._lock = threading.Lock()\n # Dictionary of trace_id -> list of spans\n self._spans: list[Span[Any]] = []\n self._traces: list[Trace] = []", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "fetch_ordered_spans", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def fetch_ordered_spans() -> list[Span[Any]]:\n return SPAN_PROCESSOR_TESTING.get_ordered_spans()\ndef fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "fetch_traces", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "fetch_events", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "assert_no_spans", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")\n assert_no_spans()\ndef fetch_normalized_spans(", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "assert_no_traces", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")\n assert_no_spans()\ndef fetch_normalized_spans(\n keep_span_id: bool = False, keep_trace_id: bool = False\n) -> list[dict[str, Any]]:\n nodes: dict[tuple[str, str | None], dict[str, Any]] = {}\n traces = []", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "fetch_normalized_spans", + "kind": 2, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "def fetch_normalized_spans(\n keep_span_id: bool = False, keep_trace_id: bool = False\n) -> list[dict[str, Any]]:\n nodes: dict[tuple[str, str | None], dict[str, Any]] = {}\n traces = []\n for trace_obj in fetch_traces():\n trace = trace_obj.export()\n assert trace\n assert trace.pop(\"object\") == \"trace\"\n assert trace[\"id\"].startswith(\"trace_\")", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "TestSpanProcessorEvent", + "kind": 5, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "TestSpanProcessorEvent = Literal[\"trace_start\", \"trace_end\", \"span_start\", \"span_end\"]\nclass SpanProcessorForTests(TracingProcessor):\n \"\"\"\n A simple processor that stores finished spans in memory.\n This is thread-safe and suitable for tests or basic usage.\n \"\"\"\n def __init__(self) -> None:\n self._lock = threading.Lock()\n # Dictionary of trace_id -> list of spans\n self._spans: list[Span[Any]] = []", + "detail": "tests.testing_processor", + "documentation": {} + }, + { + "label": "SPAN_PROCESSOR_TESTING", + "kind": 5, + "importPath": "tests.testing_processor", + "description": "tests.testing_processor", + "peekOfCode": "SPAN_PROCESSOR_TESTING = SpanProcessorForTests()\ndef fetch_ordered_spans() -> list[Span[Any]]:\n return SPAN_PROCESSOR_TESTING.get_ordered_spans()\ndef fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:", + "detail": "tests.testing_processor", + "documentation": {} + } +] \ No newline at end of file diff --git a/examples/basic/agent_lifecycle_example.py b/examples/basic/agent_lifecycle_example.py index 671a69916..e556e3990 100644 --- a/examples/basic/agent_lifecycle_example.py +++ b/examples/basic/agent_lifecycle_example.py @@ -4,7 +4,7 @@ from pydantic import BaseModel -from agents import Agent, AgentHooks, RunContextWrapper, Runner, Tool, Action, function_tool +from agents import Action, Agent, AgentHooks, RunContextWrapper, Runner, Tool, function_tool class CustomAgentHooks(AgentHooks): diff --git a/examples/basic/lifecycle_example.py b/examples/basic/lifecycle_example.py index 706402d4d..7c6371683 100644 --- a/examples/basic/lifecycle_example.py +++ b/examples/basic/lifecycle_example.py @@ -4,7 +4,7 @@ from pydantic import BaseModel -from agents import Agent, RunContextWrapper, RunHooks, Runner, Tool, Usage, function_tool, Action +from agents import Action, Agent, RunContextWrapper, RunHooks, Runner, Tool, Usage, function_tool class ExampleHooks(RunHooks): diff --git a/src/agents/__init__.py b/src/agents/__init__.py index e994c548a..f7fc4982f 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -58,6 +58,7 @@ StreamEvent, ) from .tool import ( + Action, CodeInterpreterTool, ComputerTool, FileSearchTool, @@ -75,7 +76,6 @@ WebSearchTool, default_tool_error_function, function_tool, - Action, ) from .tracing import ( AgentSpanData, diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py index a7dd0a851..353d8a9ff 100644 --- a/src/agents/_run_impl.py +++ b/src/agents/_run_impl.py @@ -78,8 +78,8 @@ LocalShellTool, MCPToolApprovalRequest, Tool, - ToolRunFunction, ToolRunComputerAction, + ToolRunFunction, ) from .tool_context import ToolContext from .tracing import ( diff --git a/src/agents/lifecycle.py b/src/agents/lifecycle.py index 0c75f41c3..65ab8474b 100644 --- a/src/agents/lifecycle.py +++ b/src/agents/lifecycle.py @@ -4,7 +4,7 @@ from .agent import Agent, AgentBase from .run_context import RunContextWrapper, TContext -from .tool import Tool, Action +from .tool import Action, Tool TAgent = TypeVar("TAgent", bound=AgentBase, default=AgentBase) diff --git a/src/agents/tool.py b/src/agents/tool.py index 2018ec080..67ac224b6 100644 --- a/src/agents/tool.py +++ b/src/agents/tool.py @@ -6,6 +6,7 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Literal, Union, overload +from openai.types.responses import ResponseFunctionToolCall from openai.types.responses.file_search_tool_param import Filters, RankingOptions from openai.types.responses.response_computer_tool_call import ( PendingSafetyCheck, @@ -16,7 +17,6 @@ from openai.types.responses.web_search_tool_param import UserLocation from pydantic import ValidationError from typing_extensions import Concatenate, NotRequired, ParamSpec, TypedDict -from openai.types.responses import ResponseComputerToolCall, ResponseFunctionToolCall from . import _debug from .computer import AsyncComputer, Computer diff --git a/tests/test_agent_hooks.py b/tests/test_agent_hooks.py index 63e0177cd..935a50ce7 100644 --- a/tests/test_agent_hooks.py +++ b/tests/test_agent_hooks.py @@ -7,7 +7,7 @@ import pytest from typing_extensions import TypedDict -from agents.agent import Agent, Action +from agents.agent import Action, Agent from agents.lifecycle import AgentHooks from agents.run import Runner from agents.run_context import RunContextWrapper, TContext diff --git a/tests/test_computer_action.py b/tests/test_computer_action.py index 5dd6a4dcb..dc343d59c 100644 --- a/tests/test_computer_action.py +++ b/tests/test_computer_action.py @@ -23,8 +23,8 @@ ) from agents import ( - Agent, Action, + Agent, AgentHooks, AsyncComputer, Computer, @@ -34,9 +34,8 @@ RunHooks, ) from agents._run_impl import ComputerAction, RunImpl -from agents.tool import ToolRunComputerAction from agents.items import ToolCallOutputItem -from agents.tool import ComputerToolSafetyCheckData +from agents.tool import ComputerToolSafetyCheckData, ToolRunComputerAction class LoggingComputer(Computer): From 1d8ba0663f6aaa9532425740d918c0c4553e91a6 Mon Sep 17 00:00:00 2001 From: Meng Yan Date: Wed, 16 Jul 2025 10:56:08 +0800 Subject: [PATCH 3/4] fix: restore correct context parameter for function tool hooks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Function tools should use tool_context (ToolContext) - Computer/shell tools use context_wrapper (RunContextWrapper) - This maintains consistency with original codebase 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- src/agents/_run_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py index 353d8a9ff..44481c1ad 100644 --- a/src/agents/_run_impl.py +++ b/src/agents/_run_impl.py @@ -546,9 +546,9 @@ async def run_single_tool(action: ToolRunFunction) -> Any: span_fn.span_data.input = tool_call.arguments try: _, _, result = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, action), + hooks.on_tool_start(tool_context, agent, action), ( - agent.hooks.on_tool_start(context_wrapper, agent, action) + agent.hooks.on_tool_start(tool_context, agent, action) if agent.hooks else _coro.noop_coroutine() ), From 019c5bb0a946dcba661159324868bd002ad04664 Mon Sep 17 00:00:00 2001 From: Meng Yan Date: Wed, 16 Jul 2025 11:17:03 +0800 Subject: [PATCH 4/4] chore: remove VSCode configuration files from PR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove .vscode/ directory and all IDE-specific configuration - These files should not be included in the repository 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .vscode/PythonImportHelper-v2-Completion.json | 22635 ---------------- .vscode/launch.json | 14 - .vscode/settings.json | 7 - 3 files changed, 22656 deletions(-) delete mode 100644 .vscode/PythonImportHelper-v2-Completion.json delete mode 100644 .vscode/launch.json delete mode 100644 .vscode/settings.json diff --git a/.vscode/PythonImportHelper-v2-Completion.json b/.vscode/PythonImportHelper-v2-Completion.json deleted file mode 100644 index 49b8882f6..000000000 --- a/.vscode/PythonImportHelper-v2-Completion.json +++ /dev/null @@ -1,22635 +0,0 @@ -[ - { - "label": "runpy", - "kind": 6, - "isExtraImport": true, - "importPath": "runpy", - "description": "runpy", - "detail": "runpy", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "annotations", - "importPath": "__future__", - "description": "__future__", - "isExtraImport": true, - "detail": "__future__", - "documentation": {} - }, - { - "label": "os", - "kind": 6, - "isExtraImport": true, - "importPath": "os", - "description": "os", - "detail": "os", - "documentation": {} - }, - { - "label": "site", - "kind": 6, - "isExtraImport": true, - "importPath": "site", - "description": "site", - "detail": "site", - "documentation": {} - }, - { - "label": "sys", - "kind": 6, - "isExtraImport": true, - "importPath": "sys", - "description": "sys", - "detail": "sys", - "documentation": {} - }, - { - "label": "argparse", - "kind": 6, - "isExtraImport": true, - "importPath": "argparse", - "description": "argparse", - "detail": "argparse", - "documentation": {} - }, - { - "label": "openai", - "kind": 6, - "isExtraImport": true, - "importPath": "openai", - "description": "openai", - "detail": "openai", - "documentation": {} - }, - { - "label": "OpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncStream", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NotGiven", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NotGiven", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncStream", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncStream", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "DefaultAsyncHttpxClient", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "APIStatusError", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncStream", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NotGiven", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "DefaultAsyncHttpxClient", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "Omit", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "NOT_GIVEN", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "AsyncOpenAI", - "importPath": "openai", - "description": "openai", - "isExtraImport": true, - "detail": "openai", - "documentation": {} - }, - { - "label": "ThreadPoolExecutor", - "importPath": "concurrent.futures", - "description": "concurrent.futures", - "isExtraImport": true, - "detail": "concurrent.futures", - "documentation": {} - }, - { - "label": "asyncio", - "kind": 6, - "isExtraImport": true, - "importPath": "asyncio", - "description": "asyncio", - "detail": "asyncio", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FunctionToolResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolsToFinalOutputFunction", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolsToFinalOutputResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "input_guardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "output_guardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RawResponsesStreamEvent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Action", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Action", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchemaBase", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GenerateDynamicPromptData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "SQLiteSession", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "WebSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "custom_span", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HostedMCPTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MCPToolApprovalFunctionResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MCPToolApprovalRequest", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HostedMCPTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_tracing_disabled", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_default_openai_api", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_default_openai_client", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_tracing_disabled", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Model", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelProvider", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_tracing_disabled", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_tracing_disabled", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_tracing_disabled", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "WebSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "custom_span", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "gen_trace_id", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "CodeInterpreterTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AsyncComputer", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Button", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ComputerTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Environment", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FileSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ImageGenerationTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "WebSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunResultStreaming", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FunctionTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Model", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Prompt", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Action", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AsyncComputer", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Computer", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ComputerTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_default_openai_api", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_default_openai_client", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "set_default_openai_key", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentBase", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FunctionTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TContext", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "HandoffInputData", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ItemHelpers", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ReasoningItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MaxTurnsExceeded", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "generation_span", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Computer", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ComputerTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FileSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "WebSearchTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "AgentOutputSchemaBase", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "run_demo_loop", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FunctionTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "default_tool_error_function", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MaxTurnsExceeded", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunErrorDetails", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunHooks", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Computer", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ComputerTool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ReasoningItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "SQLiteSession", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "handoff", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "FunctionToolResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ToolsToFinalOutputResult", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MaxTurnsExceeded", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "MaxTurnsExceeded", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "OutputGuardrailTripwireTriggered", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents", - "description": "agents", - "isExtraImport": true, - "detail": "agents", - "documentation": {} - }, - { - "label": "pydantic", - "kind": 6, - "isExtraImport": true, - "importPath": "pydantic", - "description": "pydantic", - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Field", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Field", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "ConfigDict", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Field", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "ValidationError", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Field", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "create_model", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "GetCoreSchemaHandler", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "ValidationError", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "TypeAdapter", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Field", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "ValidationError", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "BaseModel", - "importPath": "pydantic", - "description": "pydantic", - "isExtraImport": true, - "detail": "pydantic", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Optional", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Protocol", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "runtime_checkable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Annotated", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "get_args", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "get_origin", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "get_type_hints", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Annotated", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Generic", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "TYPE_CHECKING", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Callable", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Union", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "overload", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Optional", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "get_args", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Optional", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Optional", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "cast", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Any", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing", - "description": "typing", - "isExtraImport": true, - "detail": "typing", - "documentation": {} - }, - { - "label": "dataclasses", - "kind": 6, - "isExtraImport": true, - "importPath": "dataclasses", - "description": "dataclasses", - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "fields", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "replace", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "fields", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "dataclass", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "field", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "fields", - "importPath": "dataclasses", - "description": "dataclasses", - "isExtraImport": true, - "detail": "dataclasses", - "documentation": {} - }, - { - "label": "json", - "kind": 6, - "isExtraImport": true, - "importPath": "json", - "description": "json", - "detail": "json", - "documentation": {} - }, - { - "label": "uuid", - "kind": 6, - "isExtraImport": true, - "importPath": "uuid", - "description": "uuid", - "detail": "uuid", - "documentation": {} - }, - { - "label": "ResponseContentPartDoneEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "EasyInputMessageParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCallParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCallParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputContentParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputFileParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputImageParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputTextParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessageParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseContentPartAddedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseContentPartDoneEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCreatedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionCallArgumentsDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputItemAddedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputItemDoneEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningSummaryPartAddedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningSummaryPartDoneEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningSummaryTextDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseRefusalDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseUsage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseIncludable", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseStreamEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseTextConfigParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ToolParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "WebSearchToolParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "response_create_params", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionWebSearch", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionWebSearch", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputItemParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseStreamEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseIncludable", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseUsage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCallParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseInputTextParam", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "Response", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputItem", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseCompletedEvent", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCall", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "ResponseFunctionWebSearch", - "importPath": "openai.types.responses", - "description": "openai.types.responses", - "isExtraImport": true, - "detail": "openai.types.responses", - "documentation": {} - }, - { - "label": "random", - "kind": 6, - "isExtraImport": true, - "importPath": "random", - "description": "random", - "detail": "random", - "documentation": {} - }, - { - "label": "base64", - "kind": 6, - "isExtraImport": true, - "importPath": "base64", - "description": "base64", - "detail": "base64", - "documentation": {} - }, - { - "label": "RECOMMENDED_PROMPT_PREFIX", - "importPath": "agents.extensions.handoff_prompt", - "description": "agents.extensions.handoff_prompt", - "isExtraImport": true, - "detail": "agents.extensions.handoff_prompt", - "documentation": {} - }, - { - "label": "prompt_with_handoff_instructions", - "importPath": "agents.extensions.handoff_prompt", - "description": "agents.extensions.handoff_prompt", - "isExtraImport": true, - "detail": "agents.extensions.handoff_prompt", - "documentation": {} - }, - { - "label": "prompt_with_handoff_instructions", - "importPath": "agents.extensions.handoff_prompt", - "description": "agents.extensions.handoff_prompt", - "isExtraImport": true, - "detail": "agents.extensions.handoff_prompt", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "MCPToolChoice", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "importPath": "agents.model_settings", - "description": "agents.model_settings", - "isExtraImport": true, - "detail": "agents.model_settings", - "documentation": {} - }, - { - "label": "time", - "kind": 6, - "isExtraImport": true, - "importPath": "time", - "description": "time", - "detail": "time", - "documentation": {} - }, - { - "label": "Sequence", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Coroutine", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Iterable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Mapping", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Sequence", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Mapping", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Sequence", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Mapping", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Awaitable", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Mapping", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "AsyncIterator", - "importPath": "collections.abc", - "description": "collections.abc", - "isExtraImport": true, - "detail": "collections.abc", - "documentation": {} - }, - { - "label": "Console", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Console", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Group", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Console", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Console", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Group", - "importPath": "rich.console", - "description": "rich.console", - "isExtraImport": true, - "detail": "rich.console", - "documentation": {} - }, - { - "label": "Live", - "importPath": "rich.live", - "description": "rich.live", - "isExtraImport": true, - "detail": "rich.live", - "documentation": {} - }, - { - "label": "Live", - "importPath": "rich.live", - "description": "rich.live", - "isExtraImport": true, - "detail": "rich.live", - "documentation": {} - }, - { - "label": "Spinner", - "importPath": "rich.spinner", - "description": "rich.spinner", - "isExtraImport": true, - "detail": "rich.spinner", - "documentation": {} - }, - { - "label": "Spinner", - "importPath": "rich.spinner", - "description": "rich.spinner", - "isExtraImport": true, - "detail": "rich.spinner", - "documentation": {} - }, - { - "label": "handoff_filters", - "importPath": "agents.extensions", - "description": "agents.extensions", - "isExtraImport": true, - "detail": "agents.extensions", - "documentation": {} - }, - { - "label": "handoff_filters", - "importPath": "agents.extensions", - "description": "agents.extensions", - "isExtraImport": true, - "detail": "agents.extensions", - "documentation": {} - }, - { - "label": "shutil", - "kind": 6, - "isExtraImport": true, - "importPath": "shutil", - "description": "shutil", - "detail": "shutil", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStdio", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStdio", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStreamableHttp", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerSse", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStreamableHttp", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStdio", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServerStdio", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPUtil", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "MCPServer", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "ToolFilterContext", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "create_static_tool_filter", - "importPath": "agents.mcp", - "description": "agents.mcp", - "isExtraImport": true, - "detail": "agents.mcp", - "documentation": {} - }, - { - "label": "subprocess", - "kind": 6, - "isExtraImport": true, - "importPath": "subprocess", - "description": "subprocess", - "detail": "subprocess", - "documentation": {} - }, - { - "label": "FastMCP", - "importPath": "mcp.server.fastmcp", - "description": "mcp.server.fastmcp", - "isExtraImport": true, - "detail": "mcp.server.fastmcp", - "documentation": {} - }, - { - "label": "FastMCP", - "importPath": "mcp.server.fastmcp", - "description": "mcp.server.fastmcp", - "isExtraImport": true, - "detail": "mcp.server.fastmcp", - "documentation": {} - }, - { - "label": "FastMCP", - "importPath": "mcp.server.fastmcp", - "description": "mcp.server.fastmcp", - "isExtraImport": true, - "detail": "mcp.server.fastmcp", - "documentation": {} - }, - { - "label": "requests", - "kind": 6, - "isExtraImport": true, - "importPath": "requests", - "description": "requests", - "detail": "requests", - "documentation": {} - }, - { - "label": "LitellmModel", - "importPath": "agents.extensions.models.litellm_model", - "description": "agents.extensions.models.litellm_model", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmModel", - "importPath": "agents.extensions.models.litellm_model", - "description": "agents.extensions.models.litellm_model", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmModel", - "importPath": "agents.extensions.models.litellm_model", - "description": "agents.extensions.models.litellm_model", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmModel", - "importPath": "agents.extensions.models.litellm_model", - "description": "agents.extensions.models.litellm_model", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmModel", - "importPath": "agents.extensions.models.litellm_model", - "description": "agents.extensions.models.litellm_model", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "numpy", - "kind": 6, - "isExtraImport": true, - "importPath": "numpy", - "description": "numpy", - "detail": "numpy", - "documentation": {} - }, - { - "label": "RealtimeSession", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeRunner", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeSessionEvent", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeRunner", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeSession", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "RealtimeSessionEvent", - "importPath": "agents.realtime", - "description": "agents.realtime", - "isExtraImport": true, - "detail": "agents.realtime", - "documentation": {} - }, - { - "label": "sounddevice", - "kind": 6, - "isExtraImport": true, - "importPath": "sounddevice", - "description": "sounddevice", - "detail": "sounddevice", - "documentation": {} - }, - { - "label": "numpy.typing", - "kind": 6, - "isExtraImport": true, - "importPath": "numpy.typing", - "description": "numpy.typing", - "detail": "numpy.typing", - "documentation": {} - }, - { - "label": "events", - "importPath": "textual", - "description": "textual", - "isExtraImport": true, - "detail": "textual", - "documentation": {} - }, - { - "label": "events", - "importPath": "textual", - "description": "textual", - "isExtraImport": true, - "detail": "textual", - "documentation": {} - }, - { - "label": "App", - "importPath": "textual.app", - "description": "textual.app", - "isExtraImport": true, - "detail": "textual.app", - "documentation": {} - }, - { - "label": "ComposeResult", - "importPath": "textual.app", - "description": "textual.app", - "isExtraImport": true, - "detail": "textual.app", - "documentation": {} - }, - { - "label": "App", - "importPath": "textual.app", - "description": "textual.app", - "isExtraImport": true, - "detail": "textual.app", - "documentation": {} - }, - { - "label": "ComposeResult", - "importPath": "textual.app", - "description": "textual.app", - "isExtraImport": true, - "detail": "textual.app", - "documentation": {} - }, - { - "label": "Container", - "importPath": "textual.containers", - "description": "textual.containers", - "isExtraImport": true, - "detail": "textual.containers", - "documentation": {} - }, - { - "label": "Horizontal", - "importPath": "textual.containers", - "description": "textual.containers", - "isExtraImport": true, - "detail": "textual.containers", - "documentation": {} - }, - { - "label": "Container", - "importPath": "textual.containers", - "description": "textual.containers", - "isExtraImport": true, - "detail": "textual.containers", - "documentation": {} - }, - { - "label": "reactive", - "importPath": "textual.reactive", - "description": "textual.reactive", - "isExtraImport": true, - "detail": "textual.reactive", - "documentation": {} - }, - { - "label": "reactive", - "importPath": "textual.reactive", - "description": "textual.reactive", - "isExtraImport": true, - "detail": "textual.reactive", - "documentation": {} - }, - { - "label": "RichLog", - "importPath": "textual.widgets", - "description": "textual.widgets", - "isExtraImport": true, - "detail": "textual.widgets", - "documentation": {} - }, - { - "label": "Static", - "importPath": "textual.widgets", - "description": "textual.widgets", - "isExtraImport": true, - "detail": "textual.widgets", - "documentation": {} - }, - { - "label": "Button", - "importPath": "textual.widgets", - "description": "textual.widgets", - "isExtraImport": true, - "detail": "textual.widgets", - "documentation": {} - }, - { - "label": "RichLog", - "importPath": "textual.widgets", - "description": "textual.widgets", - "isExtraImport": true, - "detail": "textual.widgets", - "documentation": {} - }, - { - "label": "Static", - "importPath": "textual.widgets", - "description": "textual.widgets", - "isExtraImport": true, - "detail": "textual.widgets", - "documentation": {} - }, - { - "label": "override", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "override", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "assert_never", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "assert_never", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "Literal", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "get_args", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "get_origin", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "Unpack", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeVar", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeAlias", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypeGuard", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "Concatenate", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "NotRequired", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "ParamSpec", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "TypedDict", - "importPath": "typing_extensions", - "description": "typing_extensions", - "isExtraImport": true, - "detail": "typing_extensions", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "Model", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelTracing", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "Model", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "ModelProvider", - "importPath": "agents.models.interface", - "description": "agents.models.interface", - "isExtraImport": true, - "detail": "agents.models.interface", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "importPath": "agents.models.openai_provider", - "description": "agents.models.openai_provider", - "isExtraImport": true, - "detail": "agents.models.openai_provider", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "importPath": "agents.models.openai_provider", - "description": "agents.models.openai_provider", - "isExtraImport": true, - "detail": "agents.models.openai_provider", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "importPath": "agents.models.openai_provider", - "description": "agents.models.openai_provider", - "isExtraImport": true, - "detail": "agents.models.openai_provider", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "importPath": "agents.models.openai_provider", - "description": "agents.models.openai_provider", - "isExtraImport": true, - "detail": "agents.models.openai_provider", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "agents.types", - "description": "agents.types", - "isExtraImport": true, - "detail": "agents.types", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "agents.types", - "description": "agents.types", - "isExtraImport": true, - "detail": "agents.types", - "documentation": {} - }, - { - "label": "ReasoningItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseStreamEvent", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseStreamEvent", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "RunItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "HandoffOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "importPath": "agents.items", - "description": "agents.items", - "isExtraImport": true, - "detail": "agents.items", - "documentation": {} - }, - { - "label": "Browser", - "importPath": "playwright.async_api", - "description": "playwright.async_api", - "isExtraImport": true, - "detail": "playwright.async_api", - "documentation": {} - }, - { - "label": "Page", - "importPath": "playwright.async_api", - "description": "playwright.async_api", - "isExtraImport": true, - "detail": "playwright.async_api", - "documentation": {} - }, - { - "label": "Playwright", - "importPath": "playwright.async_api", - "description": "playwright.async_api", - "isExtraImport": true, - "detail": "playwright.async_api", - "documentation": {} - }, - { - "label": "async_playwright", - "importPath": "playwright.async_api", - "description": "playwright.async_api", - "isExtraImport": true, - "detail": "playwright.async_api", - "documentation": {} - }, - { - "label": "tempfile", - "kind": 6, - "isExtraImport": true, - "importPath": "tempfile", - "description": "tempfile", - "detail": "tempfile", - "documentation": {} - }, - { - "label": "AudioInput", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "SingleAgentVoiceWorkflow", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "SingleAgentWorkflowCallbacks", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "VoicePipeline", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "StreamedAudioInput", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "VoicePipeline", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "VoiceWorkflowBase", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "VoiceWorkflowHelper", - "importPath": "agents.voice", - "description": "agents.voice", - "isExtraImport": true, - "detail": "agents.voice", - "documentation": {} - }, - { - "label": "curses", - "kind": 6, - "isExtraImport": true, - "importPath": "curses", - "description": "curses", - "detail": "curses", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "InputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "OutputTokensDetails", - "importPath": "openai.types.responses.response_usage", - "description": "openai.types.responses.response_usage", - "isExtraImport": true, - "detail": "openai.types.responses.response_usage", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "AgentsException", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "importPath": "agents.exceptions", - "description": "agents.exceptions", - "isExtraImport": true, - "detail": "agents.exceptions", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessageToolCall", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionAssistantMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionContentPartImageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionContentPartParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionContentPartTextParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionDeveloperMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessageToolCallParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionSystemMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionToolChoiceOptionParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionToolMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionUserMessageParam", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletion", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessageToolCall", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletion", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat", - "description": "openai.types.chat", - "isExtraImport": true, - "detail": "openai.types.chat", - "documentation": {} - }, - { - "label": "Annotation", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "AnnotationURLCitation", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "ChatCompletionMessage", - "importPath": "openai.types.chat.chat_completion_message", - "description": "openai.types.chat.chat_completion_message", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message", - "documentation": {} - }, - { - "label": "Function", - "importPath": "openai.types.chat.chat_completion_message_tool_call", - "description": "openai.types.chat.chat_completion_message_tool_call", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message_tool_call", - "documentation": {} - }, - { - "label": "ChatCompletionMessageToolCall", - "importPath": "openai.types.chat.chat_completion_message_tool_call", - "description": "openai.types.chat.chat_completion_message_tool_call", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message_tool_call", - "documentation": {} - }, - { - "label": "Function", - "importPath": "openai.types.chat.chat_completion_message_tool_call", - "description": "openai.types.chat.chat_completion_message_tool_call", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message_tool_call", - "documentation": {} - }, - { - "label": "Function", - "importPath": "openai.types.chat.chat_completion_message_tool_call", - "description": "openai.types.chat.chat_completion_message_tool_call", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_message_tool_call", - "documentation": {} - }, - { - "label": "graphviz", - "kind": 6, - "isExtraImport": true, - "importPath": "graphviz", - "description": "graphviz", - "detail": "graphviz", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents.handoffs", - "description": "agents.handoffs", - "isExtraImport": true, - "detail": "agents.handoffs", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents.handoffs", - "description": "agents.handoffs", - "isExtraImport": true, - "detail": "agents.handoffs", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents.handoffs", - "description": "agents.handoffs", - "isExtraImport": true, - "detail": "agents.handoffs", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents.handoffs", - "description": "agents.handoffs", - "isExtraImport": true, - "detail": "agents.handoffs", - "documentation": {} - }, - { - "label": "Handoff", - "importPath": "agents.handoffs", - "description": "agents.handoffs", - "isExtraImport": true, - "detail": "agents.handoffs", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "FunctionTool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "FunctionTool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "FunctionToolResult", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "ToolRunComputerAction", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "ComputerToolSafetyCheckData", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "default_tool_error_function", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "function_tool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "FileSearchTool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "WebSearchTool", - "importPath": "agents.tool", - "description": "agents.tool", - "isExtraImport": true, - "detail": "agents.tool", - "documentation": {} - }, - { - "label": "abc", - "kind": 6, - "isExtraImport": true, - "importPath": "abc", - "description": "abc", - "detail": "abc", - "documentation": {} - }, - { - "label": "ABC", - "importPath": "abc", - "description": "abc", - "isExtraImport": true, - "detail": "abc", - "documentation": {} - }, - { - "label": "abstractmethod", - "importPath": "abc", - "description": "abc", - "isExtraImport": true, - "detail": "abc", - "documentation": {} - }, - { - "label": "ABC", - "importPath": "abc", - "description": "abc", - "isExtraImport": true, - "detail": "abc", - "documentation": {} - }, - { - "label": "abstractmethod", - "importPath": "abc", - "description": "abc", - "isExtraImport": true, - "detail": "abc", - "documentation": {} - }, - { - "label": "inspect", - "kind": 6, - "isExtraImport": true, - "importPath": "inspect", - "description": "inspect", - "detail": "inspect", - "documentation": {} - }, - { - "label": "contextlib", - "kind": 6, - "isExtraImport": true, - "importPath": "contextlib", - "description": "contextlib", - "detail": "contextlib", - "documentation": {} - }, - { - "label": "AbstractAsyncContextManager", - "importPath": "contextlib", - "description": "contextlib", - "isExtraImport": true, - "detail": "contextlib", - "documentation": {} - }, - { - "label": "AsyncExitStack", - "importPath": "contextlib", - "description": "contextlib", - "isExtraImport": true, - "detail": "contextlib", - "documentation": {} - }, - { - "label": "timedelta", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "datetime", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "datetime", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "timezone", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "datetime", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "datetime", - "importPath": "datetime", - "description": "datetime", - "isExtraImport": true, - "detail": "datetime", - "documentation": {} - }, - { - "label": "Path", - "importPath": "pathlib", - "description": "pathlib", - "isExtraImport": true, - "detail": "pathlib", - "documentation": {} - }, - { - "label": "Path", - "importPath": "pathlib", - "description": "pathlib", - "isExtraImport": true, - "detail": "pathlib", - "documentation": {} - }, - { - "label": "Path", - "importPath": "pathlib", - "description": "pathlib", - "isExtraImport": true, - "detail": "pathlib", - "documentation": {} - }, - { - "label": "MemoryObjectReceiveStream", - "importPath": "anyio.streams.memory", - "description": "anyio.streams.memory", - "isExtraImport": true, - "detail": "anyio.streams.memory", - "documentation": {} - }, - { - "label": "MemoryObjectSendStream", - "importPath": "anyio.streams.memory", - "description": "anyio.streams.memory", - "isExtraImport": true, - "detail": "anyio.streams.memory", - "documentation": {} - }, - { - "label": "ClientSession", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "StdioServerParameters", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "stdio_client", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp", - "description": "mcp", - "isExtraImport": true, - "detail": "mcp", - "documentation": {} - }, - { - "label": "sse_client", - "importPath": "mcp.client.sse", - "description": "mcp.client.sse", - "isExtraImport": true, - "detail": "mcp.client.sse", - "documentation": {} - }, - { - "label": "GetSessionIdCallback", - "importPath": "mcp.client.streamable_http", - "description": "mcp.client.streamable_http", - "isExtraImport": true, - "detail": "mcp.client.streamable_http", - "documentation": {} - }, - { - "label": "streamablehttp_client", - "importPath": "mcp.client.streamable_http", - "description": "mcp.client.streamable_http", - "isExtraImport": true, - "detail": "mcp.client.streamable_http", - "documentation": {} - }, - { - "label": "SessionMessage", - "importPath": "mcp.shared.message", - "description": "mcp.shared.message", - "isExtraImport": true, - "detail": "mcp.shared.message", - "documentation": {} - }, - { - "label": "CallToolResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "GetPromptResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "InitializeResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "ListPromptsResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "CallToolResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "GetPromptResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "ListPromptsResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "PromptMessage", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "TextContent", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "ListToolsResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "ListToolsResult", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "Tool", - "importPath": "mcp.types", - "description": "mcp.types", - "isExtraImport": true, - "detail": "mcp.types", - "documentation": {} - }, - { - "label": "functools", - "kind": 6, - "isExtraImport": true, - "importPath": "functools", - "description": "functools", - "detail": "functools", - "documentation": {} - }, - { - "label": "cached_property", - "importPath": "functools", - "description": "functools", - "isExtraImport": true, - "detail": "functools", - "documentation": {} - }, - { - "label": "sqlite3", - "kind": 6, - "isExtraImport": true, - "importPath": "sqlite3", - "description": "sqlite3", - "detail": "sqlite3", - "documentation": {} - }, - { - "label": "threading", - "kind": 6, - "isExtraImport": true, - "importPath": "threading", - "description": "threading", - "detail": "threading", - "documentation": {} - }, - { - "label": "File", - "importPath": "openai.types.chat.chat_completion_content_part_param", - "description": "openai.types.chat.chat_completion_content_part_param", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_content_part_param", - "documentation": {} - }, - { - "label": "FileFile", - "importPath": "openai.types.chat.chat_completion_content_part_param", - "description": "openai.types.chat.chat_completion_content_part_param", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_content_part_param", - "documentation": {} - }, - { - "label": "ChatCompletionToolParam", - "importPath": "openai.types.chat.chat_completion_tool_param", - "description": "openai.types.chat.chat_completion_tool_param", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_tool_param", - "documentation": {} - }, - { - "label": "ResponseFormat", - "importPath": "openai.types.chat.completion_create_params", - "description": "openai.types.chat.completion_create_params", - "isExtraImport": true, - "detail": "openai.types.chat.completion_create_params", - "documentation": {} - }, - { - "label": "FunctionCallOutput", - "importPath": "openai.types.responses.response_input_param", - "description": "openai.types.responses.response_input_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_param", - "documentation": {} - }, - { - "label": "ItemReference", - "importPath": "openai.types.responses.response_input_param", - "description": "openai.types.responses.response_input_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_param", - "documentation": {} - }, - { - "label": "Message", - "importPath": "openai.types.responses.response_input_param", - "description": "openai.types.responses.response_input_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_param", - "documentation": {} - }, - { - "label": "ComputerCallOutput", - "importPath": "openai.types.responses.response_input_param", - "description": "openai.types.responses.response_input_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_param", - "documentation": {} - }, - { - "label": "McpApprovalResponse", - "importPath": "openai.types.responses.response_input_param", - "description": "openai.types.responses.response_input_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_param", - "documentation": {} - }, - { - "label": "Summary", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "Summary", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "Summary", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "ResponseReasoningItem", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "Summary", - "importPath": "openai.types.responses.response_reasoning_item", - "description": "openai.types.responses.response_reasoning_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "PromptTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "PromptTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "PromptTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "CompletionUsage", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "PromptTokensDetails", - "importPath": "openai.types.completion_usage", - "description": "openai.types.completion_usage", - "isExtraImport": true, - "detail": "openai.types.completion_usage", - "documentation": {} - }, - { - "label": "Part", - "importPath": "openai.types.responses.response_reasoning_summary_part_added_event", - "description": "openai.types.responses.response_reasoning_summary_part_added_event", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_summary_part_added_event", - "documentation": {} - }, - { - "label": "Part", - "importPath": "openai.types.responses.response_reasoning_summary_part_done_event", - "description": "openai.types.responses.response_reasoning_summary_part_done_event", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_summary_part_done_event", - "documentation": {} - }, - { - "label": "enum", - "kind": 6, - "isExtraImport": true, - "importPath": "enum", - "description": "enum", - "detail": "enum", - "documentation": {} - }, - { - "label": "Enum", - "importPath": "enum", - "description": "enum", - "isExtraImport": true, - "detail": "enum", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "Variables", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ResponsePromptParam", - "importPath": "openai.types.responses.response_prompt_param", - "description": "openai.types.responses.response_prompt_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_prompt_param", - "documentation": {} - }, - { - "label": "ChatModel", - "importPath": "openai.types", - "description": "openai.types", - "isExtraImport": true, - "detail": "openai.types", - "documentation": {} - }, - { - "label": "ChatModel", - "importPath": "openai.types", - "description": "openai.types", - "isExtraImport": true, - "detail": "openai.types", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "ChatCompletion", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "ChatCompletion", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "ChatCompletion", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion", - "description": "openai.types.chat.chat_completion", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion", - "documentation": {} - }, - { - "label": "httpx", - "kind": 6, - "isExtraImport": true, - "importPath": "httpx", - "description": "httpx", - "detail": "httpx", - "documentation": {} - }, - { - "label": "ASGITransport", - "importPath": "httpx", - "description": "httpx", - "isExtraImport": true, - "detail": "httpx", - "documentation": {} - }, - { - "label": "AsyncClient", - "importPath": "httpx", - "description": "httpx", - "isExtraImport": true, - "detail": "httpx", - "documentation": {} - }, - { - "label": "websockets", - "kind": 6, - "isExtraImport": true, - "importPath": "websockets", - "description": "websockets", - "detail": "websockets", - "documentation": {} - }, - { - "label": "ConversationItem", - "importPath": "openai.types.beta.realtime.conversation_item", - "description": "openai.types.beta.realtime.conversation_item", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item", - "documentation": {} - }, - { - "label": "ConversationItem", - "importPath": "openai.types.beta.realtime.conversation_item", - "description": "openai.types.beta.realtime.conversation_item", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item", - "documentation": {} - }, - { - "label": "ConversationItem", - "importPath": "openai.types.beta.realtime.conversation_item", - "description": "openai.types.beta.realtime.conversation_item", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item", - "documentation": {} - }, - { - "label": "ConversationItem", - "importPath": "openai.types.beta.realtime.conversation_item", - "description": "openai.types.beta.realtime.conversation_item", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item", - "documentation": {} - }, - { - "label": "ConversationItemContent", - "importPath": "openai.types.beta.realtime.conversation_item_content", - "description": "openai.types.beta.realtime.conversation_item_content", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_content", - "documentation": {} - }, - { - "label": "ConversationItemContent", - "importPath": "openai.types.beta.realtime.conversation_item_content", - "description": "openai.types.beta.realtime.conversation_item_content", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_content", - "documentation": {} - }, - { - "label": "ConversationItemCreateEvent", - "importPath": "openai.types.beta.realtime.conversation_item_create_event", - "description": "openai.types.beta.realtime.conversation_item_create_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_create_event", - "documentation": {} - }, - { - "label": "ConversationItemCreateEvent", - "importPath": "openai.types.beta.realtime.conversation_item_create_event", - "description": "openai.types.beta.realtime.conversation_item_create_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_create_event", - "documentation": {} - }, - { - "label": "ConversationItemRetrieveEvent", - "importPath": "openai.types.beta.realtime.conversation_item_retrieve_event", - "description": "openai.types.beta.realtime.conversation_item_retrieve_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_retrieve_event", - "documentation": {} - }, - { - "label": "ConversationItemTruncateEvent", - "importPath": "openai.types.beta.realtime.conversation_item_truncate_event", - "description": "openai.types.beta.realtime.conversation_item_truncate_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_truncate_event", - "documentation": {} - }, - { - "label": "ConversationItemTruncateEvent", - "importPath": "openai.types.beta.realtime.conversation_item_truncate_event", - "description": "openai.types.beta.realtime.conversation_item_truncate_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.conversation_item_truncate_event", - "documentation": {} - }, - { - "label": "InputAudioBufferAppendEvent", - "importPath": "openai.types.beta.realtime.input_audio_buffer_append_event", - "description": "openai.types.beta.realtime.input_audio_buffer_append_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.input_audio_buffer_append_event", - "documentation": {} - }, - { - "label": "InputAudioBufferAppendEvent", - "importPath": "openai.types.beta.realtime.input_audio_buffer_append_event", - "description": "openai.types.beta.realtime.input_audio_buffer_append_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.input_audio_buffer_append_event", - "documentation": {} - }, - { - "label": "InputAudioBufferCommitEvent", - "importPath": "openai.types.beta.realtime.input_audio_buffer_commit_event", - "description": "openai.types.beta.realtime.input_audio_buffer_commit_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.input_audio_buffer_commit_event", - "documentation": {} - }, - { - "label": "RealtimeClientEvent", - "importPath": "openai.types.beta.realtime.realtime_client_event", - "description": "openai.types.beta.realtime.realtime_client_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.realtime_client_event", - "documentation": {} - }, - { - "label": "RealtimeServerEvent", - "importPath": "openai.types.beta.realtime.realtime_server_event", - "description": "openai.types.beta.realtime.realtime_server_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.realtime_server_event", - "documentation": {} - }, - { - "label": "ResponseAudioDeltaEvent", - "importPath": "openai.types.beta.realtime.response_audio_delta_event", - "description": "openai.types.beta.realtime.response_audio_delta_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.response_audio_delta_event", - "documentation": {} - }, - { - "label": "ResponseCancelEvent", - "importPath": "openai.types.beta.realtime.response_cancel_event", - "description": "openai.types.beta.realtime.response_cancel_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.response_cancel_event", - "documentation": {} - }, - { - "label": "ResponseCreateEvent", - "importPath": "openai.types.beta.realtime.response_create_event", - "description": "openai.types.beta.realtime.response_create_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.response_create_event", - "documentation": {} - }, - { - "label": "Session", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "SessionTool", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "SessionTracing", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "SessionTracingTracingConfiguration", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "SessionUpdateEvent", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "SessionTracingTracingConfiguration", - "importPath": "openai.types.beta.realtime.session_update_event", - "description": "openai.types.beta.realtime.session_update_event", - "isExtraImport": true, - "detail": "openai.types.beta.realtime.session_update_event", - "documentation": {} - }, - { - "label": "ClientConnection", - "importPath": "websockets.asyncio.client", - "description": "websockets.asyncio.client", - "isExtraImport": true, - "detail": "websockets.asyncio.client", - "documentation": {} - }, - { - "label": "MaybeAwaitable", - "importPath": "agents.util._types", - "description": "agents.util._types", - "isExtraImport": true, - "detail": "agents.util._types", - "documentation": {} - }, - { - "label": "MaybeAwaitable", - "importPath": "agents.util._types", - "description": "agents.util._types", - "isExtraImport": true, - "detail": "agents.util._types", - "documentation": {} - }, - { - "label": "logging", - "kind": 6, - "isExtraImport": true, - "importPath": "logging", - "description": "logging", - "detail": "logging", - "documentation": {} - }, - { - "label": "queue", - "kind": 6, - "isExtraImport": true, - "importPath": "queue", - "description": "queue", - "detail": "queue", - "documentation": {} - }, - { - "label": "contextvars", - "kind": 6, - "isExtraImport": true, - "importPath": "contextvars", - "description": "contextvars", - "detail": "contextvars", - "documentation": {} - }, - { - "label": "re", - "kind": 6, - "isExtraImport": true, - "importPath": "re", - "description": "re", - "detail": "re", - "documentation": {} - }, - { - "label": "io", - "kind": 6, - "isExtraImport": true, - "importPath": "io", - "description": "io", - "detail": "io", - "documentation": {} - }, - { - "label": "wave", - "kind": 6, - "isExtraImport": true, - "importPath": "wave", - "description": "wave", - "detail": "wave", - "documentation": {} - }, - { - "label": "ResponseCodeInterpreterToolCall", - "importPath": "openai.types.responses.response_code_interpreter_tool_call", - "description": "openai.types.responses.response_code_interpreter_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_code_interpreter_tool_call", - "documentation": {} - }, - { - "label": "ResponseCodeInterpreterToolCall", - "importPath": "openai.types.responses.response_code_interpreter_tool_call", - "description": "openai.types.responses.response_code_interpreter_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_code_interpreter_tool_call", - "documentation": {} - }, - { - "label": "ActionClick", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionDoubleClick", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionDrag", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionKeypress", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionMove", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionScreenshot", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionScroll", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionType", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionWait", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "PendingSafetyCheck", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionClick", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionDoubleClick", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionDrag", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionDragPath", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionKeypress", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionMove", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionScreenshot", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionScroll", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionType", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionWait", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "PendingSafetyCheck", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionScreenshot", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ResponseComputerToolCall", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ActionClick", - "importPath": "openai.types.responses.response_computer_tool_call", - "description": "openai.types.responses.response_computer_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call", - "documentation": {} - }, - { - "label": "ComputerCallOutputAcknowledgedSafetyCheck", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "ComputerCallOutput", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "FunctionCallOutput", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "LocalShellCallOutput", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "McpApprovalResponse", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "FunctionCallOutput", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "FunctionCallOutput", - "importPath": "openai.types.responses.response_input_item_param", - "description": "openai.types.responses.response_input_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_input_item_param", - "documentation": {} - }, - { - "label": "ImageGenerationCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "LocalShellCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpApprovalRequest", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpListTools", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "ImageGenerationCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "LocalShellCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpApprovalRequest", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpListTools", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "LocalShellCall", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "McpApprovalRequest", - "importPath": "openai.types.responses.response_output_item", - "description": "openai.types.responses.response_output_item", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_item", - "documentation": {} - }, - { - "label": "Docstring", - "importPath": "griffe", - "description": "griffe", - "isExtraImport": true, - "detail": "griffe", - "documentation": {} - }, - { - "label": "DocstringSectionKind", - "importPath": "griffe", - "description": "griffe", - "isExtraImport": true, - "detail": "griffe", - "documentation": {} - }, - { - "label": "FieldInfo", - "importPath": "pydantic.fields", - "description": "pydantic.fields", - "isExtraImport": true, - "detail": "pydantic.fields", - "documentation": {} - }, - { - "label": "copy", - "kind": 6, - "isExtraImport": true, - "importPath": "copy", - "description": "copy", - "detail": "copy", - "documentation": {} - }, - { - "label": "Body", - "importPath": "openai._types", - "description": "openai._types", - "isExtraImport": true, - "detail": "openai._types", - "documentation": {} - }, - { - "label": "Query", - "importPath": "openai._types", - "description": "openai._types", - "isExtraImport": true, - "detail": "openai._types", - "documentation": {} - }, - { - "label": "Reasoning", - "importPath": "openai.types.shared", - "description": "openai.types.shared", - "isExtraImport": true, - "detail": "openai.types.shared", - "documentation": {} - }, - { - "label": "Reasoning", - "importPath": "openai.types.shared", - "description": "openai.types.shared", - "isExtraImport": true, - "detail": "openai.types.shared", - "documentation": {} - }, - { - "label": "core_schema", - "importPath": "pydantic_core", - "description": "pydantic_core", - "isExtraImport": true, - "detail": "pydantic_core", - "documentation": {} - }, - { - "label": "to_json", - "importPath": "pydantic_core", - "description": "pydantic_core", - "isExtraImport": true, - "detail": "pydantic_core", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses.response_text_delta_event", - "description": "openai.types.responses.response_text_delta_event", - "isExtraImport": true, - "detail": "openai.types.responses.response_text_delta_event", - "documentation": {} - }, - { - "label": "ResponseTextDeltaEvent", - "importPath": "openai.types.responses.response_text_delta_event", - "description": "openai.types.responses.response_text_delta_event", - "isExtraImport": true, - "detail": "openai.types.responses.response_text_delta_event", - "documentation": {} - }, - { - "label": "Filters", - "importPath": "openai.types.responses.file_search_tool_param", - "description": "openai.types.responses.file_search_tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.file_search_tool_param", - "documentation": {} - }, - { - "label": "RankingOptions", - "importPath": "openai.types.responses.file_search_tool_param", - "description": "openai.types.responses.file_search_tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.file_search_tool_param", - "documentation": {} - }, - { - "label": "CodeInterpreter", - "importPath": "openai.types.responses.tool_param", - "description": "openai.types.responses.tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.tool_param", - "documentation": {} - }, - { - "label": "ImageGeneration", - "importPath": "openai.types.responses.tool_param", - "description": "openai.types.responses.tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.tool_param", - "documentation": {} - }, - { - "label": "Mcp", - "importPath": "openai.types.responses.tool_param", - "description": "openai.types.responses.tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.tool_param", - "documentation": {} - }, - { - "label": "UserLocation", - "importPath": "openai.types.responses.web_search_tool_param", - "description": "openai.types.responses.web_search_tool_param", - "isExtraImport": true, - "detail": "openai.types.responses.web_search_tool_param", - "documentation": {} - }, - { - "label": "importlib.metadata", - "kind": 6, - "isExtraImport": true, - "importPath": "importlib.metadata", - "description": "importlib.metadata", - "detail": "importlib.metadata", - "documentation": {} - }, - { - "label": "FastAPI", - "importPath": "fastapi", - "description": "fastapi", - "isExtraImport": true, - "detail": "fastapi", - "documentation": {} - }, - { - "label": "StreamingResponse", - "importPath": "starlette.responses", - "description": "starlette.responses", - "isExtraImport": true, - "detail": "starlette.responses", - "documentation": {} - }, - { - "label": "pytest", - "kind": 6, - "isExtraImport": true, - "importPath": "pytest", - "description": "pytest", - "detail": "pytest", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "snapshot", - "importPath": "inline_snapshot", - "description": "inline_snapshot", - "isExtraImport": true, - "detail": "inline_snapshot", - "documentation": {} - }, - { - "label": "_MCPServerWithClientSession", - "importPath": "agents.mcp.server", - "description": "agents.mcp.server", - "isExtraImport": true, - "detail": "agents.mcp.server", - "documentation": {} - }, - { - "label": "_MCPServerWithClientSession", - "importPath": "agents.mcp.server", - "description": "agents.mcp.server", - "isExtraImport": true, - "detail": "agents.mcp.server", - "documentation": {} - }, - { - "label": "ToolFilter", - "importPath": "agents.mcp.util", - "description": "agents.mcp.util", - "isExtraImport": true, - "detail": "agents.mcp.util", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "AsyncMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "MagicMock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "patch", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "Mock", - "importPath": "unittest.mock", - "description": "unittest.mock", - "isExtraImport": true, - "detail": "unittest.mock", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "TContext", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "importPath": "agents.run_context", - "description": "agents.run_context", - "isExtraImport": true, - "detail": "agents.run_context", - "documentation": {} - }, - { - "label": "litellm", - "kind": 6, - "isExtraImport": true, - "importPath": "litellm", - "description": "litellm", - "detail": "litellm", - "documentation": {} - }, - { - "label": "Choices", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "Message", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "Choices", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "Message", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "ModelResponse", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "litellm.types.utils", - "description": "litellm.types.utils", - "isExtraImport": true, - "detail": "litellm.types.utils", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents.models.openai_chatcompletions", - "description": "agents.models.openai_chatcompletions", - "isExtraImport": true, - "detail": "agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents.models.openai_chatcompletions", - "description": "agents.models.openai_chatcompletions", - "isExtraImport": true, - "detail": "agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents.models.openai_chatcompletions", - "description": "agents.models.openai_chatcompletions", - "isExtraImport": true, - "detail": "agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents.models.openai_chatcompletions", - "description": "agents.models.openai_chatcompletions", - "isExtraImport": true, - "detail": "agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "importPath": "agents.models.openai_chatcompletions", - "description": "agents.models.openai_chatcompletions", - "isExtraImport": true, - "detail": "agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDelta", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDeltaToolCall", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDeltaToolCallFunction", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChatCompletionChunk", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDelta", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDeltaToolCall", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDeltaToolCallFunction", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "Choice", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "ChoiceDelta", - "importPath": "openai.types.chat.chat_completion_chunk", - "description": "openai.types.chat.chat_completion_chunk", - "isExtraImport": true, - "detail": "openai.types.chat.chat_completion_chunk", - "documentation": {} - }, - { - "label": "LitellmProvider", - "importPath": "agents.extensions.models.litellm_provider", - "description": "agents.extensions.models.litellm_provider", - "isExtraImport": true, - "detail": "agents.extensions.models.litellm_provider", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "set_default_agent_runner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "Runner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "RunConfig", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "set_default_agent_runner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "importPath": "agents.run", - "description": "agents.run", - "isExtraImport": true, - "detail": "agents.run", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "importPath": "agents.realtime.agent", - "description": "agents.realtime.agent", - "isExtraImport": true, - "detail": "agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "importPath": "agents.realtime.agent", - "description": "agents.realtime.agent", - "isExtraImport": true, - "detail": "agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "importPath": "agents.realtime.agent", - "description": "agents.realtime.agent", - "isExtraImport": true, - "detail": "agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeModelTracingConfig", - "importPath": "agents.realtime.config", - "description": "agents.realtime.config", - "isExtraImport": true, - "detail": "agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeRunConfig", - "importPath": "agents.realtime.config", - "description": "agents.realtime.config", - "isExtraImport": true, - "detail": "agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeSessionModelSettings", - "importPath": "agents.realtime.config", - "description": "agents.realtime.config", - "isExtraImport": true, - "detail": "agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeRunConfig", - "importPath": "agents.realtime.config", - "description": "agents.realtime.config", - "isExtraImport": true, - "detail": "agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeModelSendAudio", - "importPath": "agents.realtime.model_inputs", - "description": "agents.realtime.model_inputs", - "isExtraImport": true, - "detail": "agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendRawMessage", - "importPath": "agents.realtime.model_inputs", - "description": "agents.realtime.model_inputs", - "isExtraImport": true, - "detail": "agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendToolOutput", - "importPath": "agents.realtime.model_inputs", - "description": "agents.realtime.model_inputs", - "isExtraImport": true, - "detail": "agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendUserInput", - "importPath": "agents.realtime.model_inputs", - "description": "agents.realtime.model_inputs", - "isExtraImport": true, - "detail": "agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelUserInputMessage", - "importPath": "agents.realtime.model_inputs", - "description": "agents.realtime.model_inputs", - "isExtraImport": true, - "detail": "agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "_ConversionHelper", - "importPath": "agents.realtime.openai_realtime", - "description": "agents.realtime.openai_realtime", - "isExtraImport": true, - "detail": "agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "_ConversionHelper", - "importPath": "agents.realtime.openai_realtime", - "description": "agents.realtime.openai_realtime", - "isExtraImport": true, - "detail": "agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "OpenAIRealtimeWebSocketModel", - "importPath": "agents.realtime.openai_realtime", - "description": "agents.realtime.openai_realtime", - "isExtraImport": true, - "detail": "agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "OpenAIRealtimeWebSocketModel", - "importPath": "agents.realtime.openai_realtime", - "description": "agents.realtime.openai_realtime", - "isExtraImport": true, - "detail": "agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "AssistantMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "SystemMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "UserMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "AssistantMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "AssistantText", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "InputAudio", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "InputText", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "UserMessageItem", - "importPath": "agents.realtime.items", - "description": "agents.realtime.items", - "isExtraImport": true, - "detail": "agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeModelEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelErrorEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelToolCallEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioDoneEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioInterruptedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelConnectionStatusEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelErrorEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelInputAudioTranscriptionCompletedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelItemDeletedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelItemUpdatedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelOtherEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelToolCallEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTranscriptDeltaEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTurnEndedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTurnStartedEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelErrorEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelExceptionEvent", - "importPath": "agents.realtime.model_events", - "description": "agents.realtime.model_events", - "isExtraImport": true, - "detail": "agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModel", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModelConfig", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModel", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModel", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModelConfig", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModelListener", - "importPath": "agents.realtime.model", - "description": "agents.realtime.model", - "isExtraImport": true, - "detail": "agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeRunner", - "importPath": "agents.realtime.runner", - "description": "agents.realtime.runner", - "isExtraImport": true, - "detail": "agents.realtime.runner", - "documentation": {} - }, - { - "label": "RealtimeSession", - "importPath": "agents.realtime.session", - "description": "agents.realtime.session", - "isExtraImport": true, - "detail": "agents.realtime.session", - "documentation": {} - }, - { - "label": "RealtimeSession", - "importPath": "agents.realtime.session", - "description": "agents.realtime.session", - "isExtraImport": true, - "detail": "agents.realtime.session", - "documentation": {} - }, - { - "label": "RealtimeSession", - "importPath": "agents.realtime.session", - "description": "agents.realtime.session", - "isExtraImport": true, - "detail": "agents.realtime.session", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "importPath": "agents.guardrail", - "description": "agents.guardrail", - "isExtraImport": true, - "detail": "agents.guardrail", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "importPath": "agents.guardrail", - "description": "agents.guardrail", - "isExtraImport": true, - "detail": "agents.guardrail", - "documentation": {} - }, - { - "label": "input_guardrail", - "importPath": "agents.guardrail", - "description": "agents.guardrail", - "isExtraImport": true, - "detail": "agents.guardrail", - "documentation": {} - }, - { - "label": "output_guardrail", - "importPath": "agents.guardrail", - "description": "agents.guardrail", - "isExtraImport": true, - "detail": "agents.guardrail", - "documentation": {} - }, - { - "label": "RealtimeAgentEndEvent", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAgentStartEvent", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudio", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudioEnd", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudioInterrupted", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeError", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeGuardrailTripped", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeHistoryAdded", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeHistoryUpdated", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeRawModelEvent", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeToolEnd", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeToolStart", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeError", - "importPath": "agents.realtime.events", - "description": "agents.realtime.events", - "isExtraImport": true, - "detail": "agents.realtime.events", - "documentation": {} - }, - { - "label": "ToolContext", - "importPath": "agents.tool_context", - "description": "agents.tool_context", - "isExtraImport": true, - "detail": "agents.tool_context", - "documentation": {} - }, - { - "label": "ToolContext", - "importPath": "agents.tool_context", - "description": "agents.tool_context", - "isExtraImport": true, - "detail": "agents.tool_context", - "documentation": {} - }, - { - "label": "ToolContext", - "importPath": "agents.tool_context", - "description": "agents.tool_context", - "isExtraImport": true, - "detail": "agents.tool_context", - "documentation": {} - }, - { - "label": "ToolContext", - "importPath": "agents.tool_context", - "description": "agents.tool_context", - "isExtraImport": true, - "detail": "agents.tool_context", - "documentation": {} - }, - { - "label": "BackendSpanExporter", - "importPath": "agents.tracing.processors", - "description": "agents.tracing.processors", - "isExtraImport": true, - "detail": "agents.tracing.processors", - "documentation": {} - }, - { - "label": "BackendSpanExporter", - "importPath": "agents.tracing.processors", - "description": "agents.tracing.processors", - "isExtraImport": true, - "detail": "agents.tracing.processors", - "documentation": {} - }, - { - "label": "BatchTraceProcessor", - "importPath": "agents.tracing.processors", - "description": "agents.tracing.processors", - "isExtraImport": true, - "detail": "agents.tracing.processors", - "documentation": {} - }, - { - "label": "SimpleNamespace", - "importPath": "types", - "description": "types", - "isExtraImport": true, - "detail": "types", - "documentation": {} - }, - { - "label": "AgentOutputSchemaBase", - "importPath": "agents.agent_output", - "description": "agents.agent_output", - "isExtraImport": true, - "detail": "agents.agent_output", - "documentation": {} - }, - { - "label": "AgentOutputSchemaBase", - "importPath": "agents.agent_output", - "description": "agents.agent_output", - "isExtraImport": true, - "detail": "agents.agent_output", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "importPath": "agents.agent_output", - "description": "agents.agent_output", - "isExtraImport": true, - "detail": "agents.agent_output", - "documentation": {} - }, - { - "label": "_WRAPPER_DICT_KEY", - "importPath": "agents.agent_output", - "description": "agents.agent_output", - "isExtraImport": true, - "detail": "agents.agent_output", - "documentation": {} - }, - { - "label": "_WRAPPER_DICT_KEY", - "importPath": "agents.agent_output", - "description": "agents.agent_output", - "isExtraImport": true, - "detail": "agents.agent_output", - "documentation": {} - }, - { - "label": "_openai_shared", - "importPath": "agents.models", - "description": "agents.models", - "isExtraImport": true, - "detail": "agents.models", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "importPath": "agents.models.openai_responses", - "description": "agents.models.openai_responses", - "isExtraImport": true, - "detail": "agents.models.openai_responses", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "importPath": "agents.models.openai_responses", - "description": "agents.models.openai_responses", - "isExtraImport": true, - "detail": "agents.models.openai_responses", - "documentation": {} - }, - { - "label": "Converter", - "importPath": "agents.models.openai_responses", - "description": "agents.models.openai_responses", - "isExtraImport": true, - "detail": "agents.models.openai_responses", - "documentation": {} - }, - { - "label": "set_trace_processors", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "SpanError", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "generation_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "Span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "Trace", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "agent_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "custom_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "function_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "generation_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "handoff_span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "trace", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "Span", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "Trace", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "TracingProcessor", - "importPath": "agents.tracing", - "description": "agents.tracing", - "isExtraImport": true, - "detail": "agents.tracing", - "documentation": {} - }, - { - "label": "get_trace_provider", - "importPath": "agents.tracing.setup", - "description": "agents.tracing.setup", - "isExtraImport": true, - "detail": "agents.tracing.setup", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents.usage", - "description": "agents.usage", - "isExtraImport": true, - "detail": "agents.usage", - "documentation": {} - }, - { - "label": "Usage", - "importPath": "agents.usage", - "description": "agents.usage", - "isExtraImport": true, - "detail": "agents.usage", - "documentation": {} - }, - { - "label": "defaultdict", - "importPath": "collections", - "description": "collections", - "isExtraImport": true, - "detail": "collections", - "documentation": {} - }, - { - "label": "defaultdict", - "importPath": "collections", - "description": "collections", - "isExtraImport": true, - "detail": "collections", - "documentation": {} - }, - { - "label": "Agent", - "importPath": "agents.agent", - "description": "agents.agent", - "isExtraImport": true, - "detail": "agents.agent", - "documentation": {} - }, - { - "label": "Action", - "importPath": "agents.agent", - "description": "agents.agent", - "isExtraImport": true, - "detail": "agents.agent", - "documentation": {} - }, - { - "label": "ToolsToFinalOutputResult", - "importPath": "agents.agent", - "description": "agents.agent", - "isExtraImport": true, - "detail": "agents.agent", - "documentation": {} - }, - { - "label": "AgentHooks", - "importPath": "agents.lifecycle", - "description": "agents.lifecycle", - "isExtraImport": true, - "detail": "agents.lifecycle", - "documentation": {} - }, - { - "label": "AgentUpdatedStreamEvent", - "importPath": "agents.stream_events", - "description": "agents.stream_events", - "isExtraImport": true, - "detail": "agents.stream_events", - "documentation": {} - }, - { - "label": "ComputerAction", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepFinalOutput", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepHandoff", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepRunAgain", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "SingleStepResult", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "AgentToolUseTracker", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "importPath": "agents._run_impl", - "description": "agents._run_impl", - "isExtraImport": true, - "detail": "agents._run_impl", - "documentation": {} - }, - { - "label": "generate_func_documentation", - "importPath": "agents.function_schema", - "description": "agents.function_schema", - "isExtraImport": true, - "detail": "agents.function_schema", - "documentation": {} - }, - { - "label": "function_schema", - "importPath": "agents.function_schema", - "description": "agents.function_schema", - "isExtraImport": true, - "detail": "agents.function_schema", - "documentation": {} - }, - { - "label": "remove_all_tools", - "importPath": "agents.extensions.handoff_filters", - "description": "agents.extensions.handoff_filters", - "isExtraImport": true, - "detail": "agents.extensions.handoff_filters", - "documentation": {} - }, - { - "label": "ResponseComputerToolCallParam", - "importPath": "openai.types.responses.response_computer_tool_call_param", - "description": "openai.types.responses.response_computer_tool_call_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_computer_tool_call_param", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCall", - "importPath": "openai.types.responses.response_file_search_tool_call", - "description": "openai.types.responses.response_file_search_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_file_search_tool_call", - "documentation": {} - }, - { - "label": "ResponseFileSearchToolCallParam", - "importPath": "openai.types.responses.response_file_search_tool_call_param", - "description": "openai.types.responses.response_file_search_tool_call_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_file_search_tool_call_param", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCall", - "importPath": "openai.types.responses.response_function_tool_call", - "description": "openai.types.responses.response_function_tool_call", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_tool_call", - "documentation": {} - }, - { - "label": "ResponseFunctionToolCallParam", - "importPath": "openai.types.responses.response_function_tool_call_param", - "description": "openai.types.responses.response_function_tool_call_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_tool_call_param", - "documentation": {} - }, - { - "label": "ActionSearch", - "importPath": "openai.types.responses.response_function_web_search", - "description": "openai.types.responses.response_function_web_search", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_web_search", - "documentation": {} - }, - { - "label": "ResponseFunctionWebSearch", - "importPath": "openai.types.responses.response_function_web_search", - "description": "openai.types.responses.response_function_web_search", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_web_search", - "documentation": {} - }, - { - "label": "ActionSearch", - "importPath": "openai.types.responses.response_function_web_search", - "description": "openai.types.responses.response_function_web_search", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_web_search", - "documentation": {} - }, - { - "label": "ResponseFunctionWebSearchParam", - "importPath": "openai.types.responses.response_function_web_search_param", - "description": "openai.types.responses.response_function_web_search_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_function_web_search_param", - "documentation": {} - }, - { - "label": "ResponseOutputMessage", - "importPath": "openai.types.responses.response_output_message", - "description": "openai.types.responses.response_output_message", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_message", - "documentation": {} - }, - { - "label": "ResponseOutputMessageParam", - "importPath": "openai.types.responses.response_output_message_param", - "description": "openai.types.responses.response_output_message_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_message_param", - "documentation": {} - }, - { - "label": "ResponseOutputRefusal", - "importPath": "openai.types.responses.response_output_refusal", - "description": "openai.types.responses.response_output_refusal", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_refusal", - "documentation": {} - }, - { - "label": "ResponseOutputText", - "importPath": "openai.types.responses.response_output_text", - "description": "openai.types.responses.response_output_text", - "isExtraImport": true, - "detail": "openai.types.responses.response_output_text", - "documentation": {} - }, - { - "label": "ResponseReasoningItemParam", - "importPath": "openai.types.responses.response_reasoning_item_param", - "description": "openai.types.responses.response_reasoning_item_param", - "isExtraImport": true, - "detail": "openai.types.responses.response_reasoning_item_param", - "documentation": {} - }, - { - "label": "ChatCmplHelpers", - "importPath": "agents.models.chatcmpl_helpers", - "description": "agents.models.chatcmpl_helpers", - "isExtraImport": true, - "detail": "agents.models.chatcmpl_helpers", - "documentation": {} - }, - { - "label": "FAKE_RESPONSES_ID", - "importPath": "agents.models.fake_id", - "description": "agents.models.fake_id", - "isExtraImport": true, - "detail": "agents.models.fake_id", - "documentation": {} - }, - { - "label": "FAKE_RESPONSES_ID", - "importPath": "agents.models.fake_id", - "description": "agents.models.fake_id", - "isExtraImport": true, - "detail": "agents.models.fake_id", - "documentation": {} - }, - { - "label": "Converter", - "importPath": "agents.models.chatcmpl_converter", - "description": "agents.models.chatcmpl_converter", - "isExtraImport": true, - "detail": "agents.models.chatcmpl_converter", - "documentation": {} - }, - { - "label": "Converter", - "importPath": "agents.models.chatcmpl_converter", - "description": "agents.models.chatcmpl_converter", - "isExtraImport": true, - "detail": "agents.models.chatcmpl_converter", - "documentation": {} - }, - { - "label": "_json", - "importPath": "agents.util", - "description": "agents.util", - "isExtraImport": true, - "detail": "agents.util", - "documentation": {} - }, - { - "label": "pretty_print_result", - "importPath": "agents.util._pretty_print", - "description": "agents.util._pretty_print", - "isExtraImport": true, - "detail": "agents.util._pretty_print", - "documentation": {} - }, - { - "label": "pretty_print_run_result_streaming", - "importPath": "agents.util._pretty_print", - "description": "agents.util._pretty_print", - "isExtraImport": true, - "detail": "agents.util._pretty_print", - "documentation": {} - }, - { - "label": "FakeModel", - "importPath": "tests.fake_model", - "description": "tests.fake_model", - "isExtraImport": true, - "detail": "tests.fake_model", - "documentation": {} - }, - { - "label": "ResponseSpanData", - "importPath": "agents.tracing.span_data", - "description": "agents.tracing.span_data", - "isExtraImport": true, - "detail": "agents.tracing.span_data", - "documentation": {} - }, - { - "label": "AgentSpanData", - "importPath": "agents.tracing.span_data", - "description": "agents.tracing.span_data", - "isExtraImport": true, - "detail": "agents.tracing.span_data", - "documentation": {} - }, - { - "label": "fake_model", - "importPath": "tests", - "description": "tests", - "isExtraImport": true, - "detail": "tests", - "documentation": {} - }, - { - "label": "mock", - "importPath": "unittest", - "description": "unittest", - "isExtraImport": true, - "detail": "unittest", - "documentation": {} - }, - { - "label": "websockets.exceptions", - "kind": 6, - "isExtraImport": true, - "importPath": "websockets.exceptions", - "description": "websockets.exceptions", - "detail": "websockets.exceptions", - "documentation": {} - }, - { - "label": "ensure_strict_json_schema", - "importPath": "agents.strict_schema", - "description": "agents.strict_schema", - "isExtraImport": true, - "detail": "agents.strict_schema", - "documentation": {} - }, - { - "label": "TracingProcessor", - "importPath": "agents.tracing.processor_interface", - "description": "agents.tracing.processor_interface", - "isExtraImport": true, - "detail": "agents.tracing.processor_interface", - "documentation": {} - }, - { - "label": "SpanImpl", - "importPath": "agents.tracing.spans", - "description": "agents.tracing.spans", - "isExtraImport": true, - "detail": "agents.tracing.spans", - "documentation": {} - }, - { - "label": "SpanError", - "importPath": "agents.tracing.spans", - "description": "agents.tracing.spans", - "isExtraImport": true, - "detail": "agents.tracing.spans", - "documentation": {} - }, - { - "label": "TraceImpl", - "importPath": "agents.tracing.traces", - "description": "agents.tracing.traces", - "isExtraImport": true, - "detail": "agents.tracing.traces", - "documentation": {} - }, - { - "label": "draw_graph", - "importPath": "agents.extensions.visualization", - "description": "agents.extensions.visualization", - "isExtraImport": true, - "detail": "agents.extensions.visualization", - "documentation": {} - }, - { - "label": "get_all_edges", - "importPath": "agents.extensions.visualization", - "description": "agents.extensions.visualization", - "isExtraImport": true, - "detail": "agents.extensions.visualization", - "documentation": {} - }, - { - "label": "get_all_nodes", - "importPath": "agents.extensions.visualization", - "description": "agents.extensions.visualization", - "isExtraImport": true, - "detail": "agents.extensions.visualization", - "documentation": {} - }, - { - "label": "get_main_graph", - "importPath": "agents.extensions.visualization", - "description": "agents.extensions.visualization", - "isExtraImport": true, - "detail": "agents.extensions.visualization", - "documentation": {} - }, - { - "label": "bin_dir", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "bin_dir = os.path.dirname(abs_file)\nbase = bin_dir[: -len(\"bin\") - 1] # strip away the bin part from the __file__, plus the path separator\n# prepend bin to PATH (this file is inside the bin directory)\nos.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "base", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "base = bin_dir[: -len(\"bin\") - 1] # strip away the bin part from the __file__, plus the path separator\n# prepend bin to PATH (this file is inside the bin directory)\nos.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "os.environ[\"PATH\"]", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "os.environ[\"PATH\"] = os.pathsep.join([bin_dir, *os.environ.get(\"PATH\", \"\").split(os.pathsep)])\nos.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "os.environ[\"VIRTUAL_ENV\"]", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "os.environ[\"VIRTUAL_ENV\"] = base # virtual env is right above bin directory\nos.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "os.environ[\"VIRTUAL_ENV_PROMPT\"]", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "os.environ[\"VIRTUAL_ENV_PROMPT\"] = \"openai-agents-python\" or os.path.basename(base) # noqa: SIM222\n# add the virtual environments libraries to the host python import mechanism\nprev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "prev_length", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "prev_length = len(sys.path)\nfor lib in \"../lib/python3.12/site-packages\".split(os.pathsep):\n path = os.path.realpath(os.path.join(bin_dir, lib))\n site.addsitedir(path)\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "sys.path[:]", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\nsys.real_prefix = sys.prefix\nsys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "sys.real_prefix", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "sys.real_prefix = sys.prefix\nsys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "sys.prefix", - "kind": 5, - "importPath": ".venv.bin.activate_this", - "description": ".venv.bin.activate_this", - "peekOfCode": "sys.prefix = base", - "detail": ".venv.bin.activate_this", - "documentation": {} - }, - { - "label": "built_instructions", - "kind": 2, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "def built_instructions(target_language: str, lang_code: str) -> str:\n do_not_translate_terms = \"\\n\".join(do_not_translate)\n specific_terms = \"\\n\".join(\n [f\"* {k} -> {v}\" for k, v in eng_to_non_eng_mapping.get(lang_code, {}).items()]\n )\n specific_instructions = \"\\n\".join(\n eng_to_non_eng_instructions.get(\"common\", [])\n + eng_to_non_eng_instructions.get(lang_code, [])\n )\n return f\"\"\"You are an expert technical translator.", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "translate_file", - "kind": 2, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "def translate_file(file_path: str, target_path: str, lang_code: str) -> None:\n print(f\"Translating {file_path} into a different language: {lang_code}\")\n with open(file_path, encoding=\"utf-8\") as f:\n content = f.read()\n # Split content into lines\n lines: list[str] = content.splitlines()\n chunks: list[str] = []\n current_chunk: list[str] = []\n # Split content into chunks of up to 120 lines, ensuring splits occur before section titles\n in_code_block = False", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "translate_single_source_file", - "kind": 2, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "def translate_single_source_file(file_path: str) -> None:\n relative_path = os.path.relpath(file_path, source_dir)\n if \"ref/\" in relative_path or not file_path.endswith(\".md\"):\n return\n for lang_code in languages:\n target_dir = os.path.join(source_dir, lang_code)\n target_path = os.path.join(target_dir, relative_path)\n # Ensure the target directory exists\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n # Translate and save the file", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "main", - "kind": 2, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "def main():\n parser = argparse.ArgumentParser(description=\"Translate documentation files\")\n parser.add_argument(\n \"--file\", type=str, help=\"Specific file to translate (relative to docs directory)\"\n )\n args = parser.parse_args()\n if args.file:\n # Translate a single file\n # Handle both \"foo.md\" and \"docs/foo.md\" formats\n if args.file.startswith(\"docs/\"):", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "OPENAI_MODEL", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "OPENAI_MODEL = os.environ.get(\"OPENAI_MODEL\", \"o3\")\nENABLE_CODE_SNIPPET_EXCLUSION = True\n# gpt-4.5 needed this for better quality\nENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "ENABLE_CODE_SNIPPET_EXCLUSION", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "ENABLE_CODE_SNIPPET_EXCLUSION = True\n# gpt-4.5 needed this for better quality\nENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "ENABLE_SMALL_CHUNK_TRANSLATION", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "ENABLE_SMALL_CHUNK_TRANSLATION = False\nSEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "SEARCH_EXCLUSION", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "SEARCH_EXCLUSION = \"\"\"---\nsearch:\n exclude: true\n---\n\"\"\"\n# Define the source and target directories\nsource_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "source_dir", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "source_dir = \"docs\"\nlanguages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"\n}\n# Initialize OpenAI client\nopenai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "languages", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "languages = {\n \"ja\": \"Japanese\",\n # Add more languages here, e.g., \"fr\": \"French\"\n}\n# Initialize OpenAI client\nopenai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "openai_client", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "openai_client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n# Define dictionaries for translation control\ndo_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",\n \"Hello World\",\n \"Model context protocol\",\n \"MCP\",\n \"structured outputs\",\n \"Chain-of-Thought\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "do_not_translate", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "do_not_translate = [\n \"OpenAI\",\n \"Agents SDK\",\n \"Hello World\",\n \"Model context protocol\",\n \"MCP\",\n \"structured outputs\",\n \"Chain-of-Thought\",\n \"Chat Completions\",\n \"Computer-Using Agent\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "eng_to_non_eng_mapping", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "eng_to_non_eng_mapping = {\n \"ja\": {\n \"agents\": \"エージェント\",\n \"computer use\": \"コンピュータ操作\",\n \"OAI hosted tools\": \"OpenAI がホストするツール\",\n \"well formed data\": \"適切な形式のデータ\",\n \"guardrail\": \"ガードレール\",\n \"handoffs\": \"ハンドオフ\",\n \"function tools\": \"関数ツール\",\n \"tracing\": \"トレーシング\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "eng_to_non_eng_instructions", - "kind": 5, - "importPath": "docs.scripts.translate_docs", - "description": "docs.scripts.translate_docs", - "peekOfCode": "eng_to_non_eng_instructions = {\n \"common\": [\n \"* The term 'examples' must be code examples when the page mentions the code examples in the repo, it can be translated as either 'code examples' or 'sample code'.\",\n \"* The term 'primitives' can be translated as basic components.\",\n \"* When the terms 'instructions' and 'tools' are mentioned as API parameter names, they must be kept as is.\",\n \"* The terms 'temperature', 'top_p', 'max_tokens', 'presence_penalty', 'frequency_penalty' as parameter names must be kept as is.\",\n ],\n \"ja\": [\n \"* The term 'result' in the Runner guide context must be translated like 'execution results'\",\n \"* The term 'raw' in 'raw response events' must be kept as is\",", - "detail": "docs.scripts.translate_docs", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.agent_patterns.agents_as_tools", - "description": "examples.agent_patterns.agents_as_tools", - "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You translate the user's message to Spanish\",\n handoff_description=\"An english to spanish translator\",\n)\nfrench_agent = Agent(\n name=\"french_agent\",\n instructions=\"You translate the user's message to French\",\n handoff_description=\"An english to french translator\",\n)", - "detail": "examples.agent_patterns.agents_as_tools", - "documentation": {} - }, - { - "label": "french_agent", - "kind": 5, - "importPath": "examples.agent_patterns.agents_as_tools", - "description": "examples.agent_patterns.agents_as_tools", - "peekOfCode": "french_agent = Agent(\n name=\"french_agent\",\n instructions=\"You translate the user's message to French\",\n handoff_description=\"An english to french translator\",\n)\nitalian_agent = Agent(\n name=\"italian_agent\",\n instructions=\"You translate the user's message to Italian\",\n handoff_description=\"An english to italian translator\",\n)", - "detail": "examples.agent_patterns.agents_as_tools", - "documentation": {} - }, - { - "label": "italian_agent", - "kind": 5, - "importPath": "examples.agent_patterns.agents_as_tools", - "description": "examples.agent_patterns.agents_as_tools", - "peekOfCode": "italian_agent = Agent(\n name=\"italian_agent\",\n instructions=\"You translate the user's message to Italian\",\n handoff_description=\"An english to italian translator\",\n)\norchestrator_agent = Agent(\n name=\"orchestrator_agent\",\n instructions=(\n \"You are a translation agent. You use the tools given to you to translate.\"\n \"If asked for multiple translations, you call the relevant tools in order.\"", - "detail": "examples.agent_patterns.agents_as_tools", - "documentation": {} - }, - { - "label": "orchestrator_agent", - "kind": 5, - "importPath": "examples.agent_patterns.agents_as_tools", - "description": "examples.agent_patterns.agents_as_tools", - "peekOfCode": "orchestrator_agent = Agent(\n name=\"orchestrator_agent\",\n instructions=(\n \"You are a translation agent. You use the tools given to you to translate.\"\n \"If asked for multiple translations, you call the relevant tools in order.\"\n \"You never translate on your own, you always use the provided tools.\"\n ),\n tools=[\n spanish_agent.as_tool(\n tool_name=\"translate_to_spanish\",", - "detail": "examples.agent_patterns.agents_as_tools", - "documentation": {} - }, - { - "label": "synthesizer_agent", - "kind": 5, - "importPath": "examples.agent_patterns.agents_as_tools", - "description": "examples.agent_patterns.agents_as_tools", - "peekOfCode": "synthesizer_agent = Agent(\n name=\"synthesizer_agent\",\n instructions=\"You inspect translations, correct them if needed, and produce a final concatenated response.\",\n)\nasync def main():\n msg = input(\"Hi! What would you like translated, and to which languages? \")\n # Run the entire orchestration in a single trace\n with trace(\"Orchestrator evaluator\"):\n orchestrator_result = await Runner.run(orchestrator_agent, msg)\n for item in orchestrator_result.new_items:", - "detail": "examples.agent_patterns.agents_as_tools", - "documentation": {} - }, - { - "label": "OutlineCheckerOutput", - "kind": 6, - "importPath": "examples.agent_patterns.deterministic", - "description": "examples.agent_patterns.deterministic", - "peekOfCode": "class OutlineCheckerOutput(BaseModel):\n good_quality: bool\n is_scifi: bool\noutline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",\n output_type=OutlineCheckerOutput,\n)\nstory_agent = Agent(\n name=\"story_agent\",", - "detail": "examples.agent_patterns.deterministic", - "documentation": {} - }, - { - "label": "story_outline_agent", - "kind": 5, - "importPath": "examples.agent_patterns.deterministic", - "description": "examples.agent_patterns.deterministic", - "peekOfCode": "story_outline_agent = Agent(\n name=\"story_outline_agent\",\n instructions=\"Generate a very short story outline based on the user's input.\",\n)\nclass OutlineCheckerOutput(BaseModel):\n good_quality: bool\n is_scifi: bool\noutline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",", - "detail": "examples.agent_patterns.deterministic", - "documentation": {} - }, - { - "label": "outline_checker_agent", - "kind": 5, - "importPath": "examples.agent_patterns.deterministic", - "description": "examples.agent_patterns.deterministic", - "peekOfCode": "outline_checker_agent = Agent(\n name=\"outline_checker_agent\",\n instructions=\"Read the given story outline, and judge the quality. Also, determine if it is a scifi story.\",\n output_type=OutlineCheckerOutput,\n)\nstory_agent = Agent(\n name=\"story_agent\",\n instructions=\"Write a short story based on the given outline.\",\n output_type=str,\n)", - "detail": "examples.agent_patterns.deterministic", - "documentation": {} - }, - { - "label": "story_agent", - "kind": 5, - "importPath": "examples.agent_patterns.deterministic", - "description": "examples.agent_patterns.deterministic", - "peekOfCode": "story_agent = Agent(\n name=\"story_agent\",\n instructions=\"Write a short story based on the given outline.\",\n output_type=str,\n)\nasync def main():\n input_prompt = input(\"What kind of story do you want? \")\n # Ensure the entire workflow is a single trace\n with trace(\"Deterministic story flow\"):\n # 1. Generate an outline", - "detail": "examples.agent_patterns.deterministic", - "documentation": {} - }, - { - "label": "Weather", - "kind": 6, - "importPath": "examples.agent_patterns.forcing_tool_use", - "description": "examples.agent_patterns.forcing_tool_use", - "peekOfCode": "class Weather(BaseModel):\n city: str\n temperature_range: str\n conditions: str\n@function_tool\ndef get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind\")\nasync def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]", - "detail": "examples.agent_patterns.forcing_tool_use", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.agent_patterns.forcing_tool_use", - "description": "examples.agent_patterns.forcing_tool_use", - "peekOfCode": "def get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind\")\nasync def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]\n) -> ToolsToFinalOutputResult:\n weather: Weather = results[0].output\n return ToolsToFinalOutputResult(\n is_final_output=True, final_output=f\"{weather.city} is {weather.conditions}.\"\n )", - "detail": "examples.agent_patterns.forcing_tool_use", - "documentation": {} - }, - { - "label": "MathHomeworkOutput", - "kind": 6, - "importPath": "examples.agent_patterns.input_guardrails", - "description": "examples.agent_patterns.input_guardrails", - "peekOfCode": "class MathHomeworkOutput(BaseModel):\n reasoning: str\n is_math_homework: bool\nguardrail_agent = Agent(\n name=\"Guardrail check\",\n instructions=\"Check if the user is asking you to do their math homework.\",\n output_type=MathHomeworkOutput,\n)\n@input_guardrail\nasync def math_guardrail(", - "detail": "examples.agent_patterns.input_guardrails", - "documentation": {} - }, - { - "label": "guardrail_agent", - "kind": 5, - "importPath": "examples.agent_patterns.input_guardrails", - "description": "examples.agent_patterns.input_guardrails", - "peekOfCode": "guardrail_agent = Agent(\n name=\"Guardrail check\",\n instructions=\"Check if the user is asking you to do their math homework.\",\n output_type=MathHomeworkOutput,\n)\n@input_guardrail\nasync def math_guardrail(\n context: RunContextWrapper[None], agent: Agent, input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n \"\"\"This is an input guardrail function, which happens to call an agent to check if the input", - "detail": "examples.agent_patterns.input_guardrails", - "documentation": {} - }, - { - "label": "EvaluationFeedback", - "kind": 6, - "importPath": "examples.agent_patterns.llm_as_a_judge", - "description": "examples.agent_patterns.llm_as_a_judge", - "peekOfCode": "class EvaluationFeedback:\n feedback: str\n score: Literal[\"pass\", \"needs_improvement\", \"fail\"]\nevaluator = Agent[None](\n name=\"evaluator\",\n instructions=(\n \"You evaluate a story outline and decide if it's good enough.\"\n \"If it's not good enough, you provide feedback on what needs to be improved.\"\n \"Never give it a pass on the first try. After 5 attempts, you can give it a pass if story outline is good enough - do not go for perfection\"\n ),", - "detail": "examples.agent_patterns.llm_as_a_judge", - "documentation": {} - }, - { - "label": "story_outline_generator", - "kind": 5, - "importPath": "examples.agent_patterns.llm_as_a_judge", - "description": "examples.agent_patterns.llm_as_a_judge", - "peekOfCode": "story_outline_generator = Agent(\n name=\"story_outline_generator\",\n instructions=(\n \"You generate a very short story outline based on the user's input.\"\n \"If there is any feedback provided, use it to improve the outline.\"\n ),\n)\n@dataclass\nclass EvaluationFeedback:\n feedback: str", - "detail": "examples.agent_patterns.llm_as_a_judge", - "documentation": {} - }, - { - "label": "evaluator", - "kind": 5, - "importPath": "examples.agent_patterns.llm_as_a_judge", - "description": "examples.agent_patterns.llm_as_a_judge", - "peekOfCode": "evaluator = Agent[None](\n name=\"evaluator\",\n instructions=(\n \"You evaluate a story outline and decide if it's good enough.\"\n \"If it's not good enough, you provide feedback on what needs to be improved.\"\n \"Never give it a pass on the first try. After 5 attempts, you can give it a pass if story outline is good enough - do not go for perfection\"\n ),\n output_type=EvaluationFeedback,\n)\nasync def main() -> None:", - "detail": "examples.agent_patterns.llm_as_a_judge", - "documentation": {} - }, - { - "label": "MessageOutput", - "kind": 6, - "importPath": "examples.agent_patterns.output_guardrails", - "description": "examples.agent_patterns.output_guardrails", - "peekOfCode": "class MessageOutput(BaseModel):\n reasoning: str = Field(description=\"Thoughts on how to respond to the user's message\")\n response: str = Field(description=\"The response to the user's message\")\n user_name: str | None = Field(description=\"The name of the user who sent the message, if known\")\n@output_guardrail\nasync def sensitive_data_check(\n context: RunContextWrapper, agent: Agent, output: MessageOutput\n) -> GuardrailFunctionOutput:\n phone_number_in_response = \"650\" in output.response\n phone_number_in_reasoning = \"650\" in output.reasoning", - "detail": "examples.agent_patterns.output_guardrails", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.agent_patterns.output_guardrails", - "description": "examples.agent_patterns.output_guardrails", - "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n output_type=MessageOutput,\n output_guardrails=[sensitive_data_check],\n)\nasync def main():\n # This should be ok\n await Runner.run(agent, \"What's the capital of California?\")\n print(\"First message passed\")", - "detail": "examples.agent_patterns.output_guardrails", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.agent_patterns.parallelization", - "description": "examples.agent_patterns.parallelization", - "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You translate the user's message to Spanish\",\n)\ntranslation_picker = Agent(\n name=\"translation_picker\",\n instructions=\"You pick the best Spanish translation from the given options.\",\n)\nasync def main():\n msg = input(\"Hi! Enter a message, and we'll translate it to Spanish.\\n\\n\")", - "detail": "examples.agent_patterns.parallelization", - "documentation": {} - }, - { - "label": "translation_picker", - "kind": 5, - "importPath": "examples.agent_patterns.parallelization", - "description": "examples.agent_patterns.parallelization", - "peekOfCode": "translation_picker = Agent(\n name=\"translation_picker\",\n instructions=\"You pick the best Spanish translation from the given options.\",\n)\nasync def main():\n msg = input(\"Hi! Enter a message, and we'll translate it to Spanish.\\n\\n\")\n # Ensure the entire workflow is a single trace\n with trace(\"Parallel translation\"):\n res_1, res_2, res_3 = await asyncio.gather(\n Runner.run(", - "detail": "examples.agent_patterns.parallelization", - "documentation": {} - }, - { - "label": "french_agent", - "kind": 5, - "importPath": "examples.agent_patterns.routing", - "description": "examples.agent_patterns.routing", - "peekOfCode": "french_agent = Agent(\n name=\"french_agent\",\n instructions=\"You only speak French\",\n)\nspanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You only speak Spanish\",\n)\nenglish_agent = Agent(\n name=\"english_agent\",", - "detail": "examples.agent_patterns.routing", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.agent_patterns.routing", - "description": "examples.agent_patterns.routing", - "peekOfCode": "spanish_agent = Agent(\n name=\"spanish_agent\",\n instructions=\"You only speak Spanish\",\n)\nenglish_agent = Agent(\n name=\"english_agent\",\n instructions=\"You only speak English\",\n)\ntriage_agent = Agent(\n name=\"triage_agent\",", - "detail": "examples.agent_patterns.routing", - "documentation": {} - }, - { - "label": "english_agent", - "kind": 5, - "importPath": "examples.agent_patterns.routing", - "description": "examples.agent_patterns.routing", - "peekOfCode": "english_agent = Agent(\n name=\"english_agent\",\n instructions=\"You only speak English\",\n)\ntriage_agent = Agent(\n name=\"triage_agent\",\n instructions=\"Handoff to the appropriate agent based on the language of the request.\",\n handoffs=[french_agent, spanish_agent, english_agent],\n)\nasync def main():", - "detail": "examples.agent_patterns.routing", - "documentation": {} - }, - { - "label": "triage_agent", - "kind": 5, - "importPath": "examples.agent_patterns.routing", - "description": "examples.agent_patterns.routing", - "peekOfCode": "triage_agent = Agent(\n name=\"triage_agent\",\n instructions=\"Handoff to the appropriate agent based on the language of the request.\",\n handoffs=[french_agent, spanish_agent, english_agent],\n)\nasync def main():\n # We'll create an ID for this conversation, so we can link each trace\n conversation_id = str(uuid.uuid4().hex[:16])\n msg = input(\"Hi! We speak French, Spanish and English. How can I help? \")\n agent = triage_agent", - "detail": "examples.agent_patterns.routing", - "documentation": {} - }, - { - "label": "GuardrailOutput", - "kind": 6, - "importPath": "examples.agent_patterns.streaming_guardrails", - "description": "examples.agent_patterns.streaming_guardrails", - "peekOfCode": "class GuardrailOutput(BaseModel):\n reasoning: str = Field(\n description=\"Reasoning about whether the response could be understood by a ten year old.\"\n )\n is_readable_by_ten_year_old: bool = Field(\n description=\"Whether the response is understandable by a ten year old.\"\n )\nguardrail_agent = Agent(\n name=\"Checker\",\n instructions=(", - "detail": "examples.agent_patterns.streaming_guardrails", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.agent_patterns.streaming_guardrails", - "description": "examples.agent_patterns.streaming_guardrails", - "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"You are a helpful assistant. You ALWAYS write long responses, making sure to be verbose \"\n \"and detailed.\"\n ),\n)\nclass GuardrailOutput(BaseModel):\n reasoning: str = Field(\n description=\"Reasoning about whether the response could be understood by a ten year old.\"", - "detail": "examples.agent_patterns.streaming_guardrails", - "documentation": {} - }, - { - "label": "guardrail_agent", - "kind": 5, - "importPath": "examples.agent_patterns.streaming_guardrails", - "description": "examples.agent_patterns.streaming_guardrails", - "peekOfCode": "guardrail_agent = Agent(\n name=\"Checker\",\n instructions=(\n \"You will be given a question and a response. Your goal is to judge whether the response \"\n \"is simple enough to be understood by a ten year old.\"\n ),\n output_type=GuardrailOutput,\n model=\"gpt-4o-mini\",\n)\nasync def check_guardrail(text: str) -> GuardrailOutput:", - "detail": "examples.agent_patterns.streaming_guardrails", - "documentation": {} - }, - { - "label": "CustomAgentHooks", - "kind": 6, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "class CustomAgentHooks(AgentHooks):\n def __init__(self, display_name: str):\n self.event_counter = 0\n self.display_name = display_name\n async def on_start(self, context: RunContextWrapper, agent: Agent) -> None:\n self.event_counter += 1\n print(f\"### ({self.display_name}) {self.event_counter}: Agent {agent.name} started\")\n async def on_end(self, context: RunContextWrapper, agent: Agent, output: Any) -> None:\n self.event_counter += 1\n print(", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "FinalResult", - "kind": 6, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "class FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n hooks=CustomAgentHooks(display_name=\"Multiply Agent\"),\n)\nstart_agent = Agent(", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "random_number", - "kind": 2, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "def random_number(max: int) -> int:\n \"\"\"\n Generate a random number up to the provided maximum.\n \"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Simple multiplication by two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "multiply_by_two", - "kind": 2, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "def multiply_by_two(x: int) -> int:\n \"\"\"Simple multiplication by two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "multiply_agent", - "kind": 5, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "multiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n hooks=CustomAgentHooks(display_name=\"Multiply Agent\"),\n)\nstart_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiply agent.\",", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "start_agent", - "kind": 5, - "importPath": "examples.basic.agent_lifecycle_example", - "description": "examples.basic.agent_lifecycle_example", - "peekOfCode": "start_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiply agent.\",\n tools=[random_number],\n output_type=FinalResult,\n handoffs=[multiply_agent],\n hooks=CustomAgentHooks(display_name=\"Start Agent\"),\n)\nasync def main() -> None:\n user_input = input(\"Enter a max number: \")", - "detail": "examples.basic.agent_lifecycle_example", - "documentation": {} - }, - { - "label": "CustomContext", - "kind": 6, - "importPath": "examples.basic.dynamic_system_prompt", - "description": "examples.basic.dynamic_system_prompt", - "peekOfCode": "class CustomContext:\n def __init__(self, style: Literal[\"haiku\", \"pirate\", \"robot\"]):\n self.style = style\ndef custom_instructions(\n run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext]\n) -> str:\n context = run_context.context\n if context.style == \"haiku\":\n return \"Only respond in haikus.\"\n elif context.style == \"pirate\":", - "detail": "examples.basic.dynamic_system_prompt", - "documentation": {} - }, - { - "label": "custom_instructions", - "kind": 2, - "importPath": "examples.basic.dynamic_system_prompt", - "description": "examples.basic.dynamic_system_prompt", - "peekOfCode": "def custom_instructions(\n run_context: RunContextWrapper[CustomContext], agent: Agent[CustomContext]\n) -> str:\n context = run_context.context\n if context.style == \"haiku\":\n return \"Only respond in haikus.\"\n elif context.style == \"pirate\":\n return \"Respond as a pirate.\"\n else:\n return \"Respond as a robot and say 'beep boop' a lot.\"", - "detail": "examples.basic.dynamic_system_prompt", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.basic.dynamic_system_prompt", - "description": "examples.basic.dynamic_system_prompt", - "peekOfCode": "agent = Agent(\n name=\"Chat agent\",\n instructions=custom_instructions,\n)\nasync def main():\n choice: Literal[\"haiku\", \"pirate\", \"robot\"] = random.choice([\"haiku\", \"pirate\", \"robot\"])\n context = CustomContext(style=choice)\n print(f\"Using style: {choice}\\n\")\n user_message = \"Tell me a joke.\"\n print(f\"User: {user_message}\")", - "detail": "examples.basic.dynamic_system_prompt", - "documentation": {} - }, - { - "label": "ExampleHooks", - "kind": 6, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "class ExampleHooks(RunHooks):\n def __init__(self):\n self.event_counter = 0\n def _usage_to_str(self, usage: Usage) -> str:\n return f\"{usage.requests} requests, {usage.input_tokens} input tokens, {usage.output_tokens} output tokens, {usage.total_tokens} total tokens\"\n async def on_agent_start(self, context: RunContextWrapper, agent: Agent) -> None:\n self.event_counter += 1\n print(\n f\"### {self.event_counter}: Agent {agent.name} started. Usage: {self._usage_to_str(context.usage)}\"\n )", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "FinalResult", - "kind": 6, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "class FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n)\nstart_agent = Agent(\n name=\"Start Agent\",", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "random_number", - "kind": 2, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "def random_number(max: int) -> int:\n \"\"\"Generate a random number up to the provided max.\"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "multiply_by_two", - "kind": 2, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "def multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2\nclass FinalResult(BaseModel):\n number: int\nmultiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "hooks", - "kind": 5, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "hooks = ExampleHooks()\n###\n@function_tool\ndef random_number(max: int) -> int:\n \"\"\"Generate a random number up to the provided max.\"\"\"\n return random.randint(0, max)\n@function_tool\ndef multiply_by_two(x: int) -> int:\n \"\"\"Return x times two.\"\"\"\n return x * 2", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "multiply_agent", - "kind": 5, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "multiply_agent = Agent(\n name=\"Multiply Agent\",\n instructions=\"Multiply the number by 2 and then return the final result.\",\n tools=[multiply_by_two],\n output_type=FinalResult,\n)\nstart_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.\",\n tools=[random_number],", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "start_agent", - "kind": 5, - "importPath": "examples.basic.lifecycle_example", - "description": "examples.basic.lifecycle_example", - "peekOfCode": "start_agent = Agent(\n name=\"Start Agent\",\n instructions=\"Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.\",\n tools=[random_number],\n output_type=FinalResult,\n handoffs=[multiply_agent],\n)\nasync def main() -> None:\n user_input = input(\"Enter a max number: \")\n await Runner.run(", - "detail": "examples.basic.lifecycle_example", - "documentation": {} - }, - { - "label": "image_to_base64", - "kind": 2, - "importPath": "examples.basic.local_image", - "description": "examples.basic.local_image", - "peekOfCode": "def image_to_base64(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n return encoded_string\nasync def main():\n # Print base64-encoded image\n b64_image = image_to_base64(FILEPATH)\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",", - "detail": "examples.basic.local_image", - "documentation": {} - }, - { - "label": "FILEPATH", - "kind": 5, - "importPath": "examples.basic.local_image", - "description": "examples.basic.local_image", - "peekOfCode": "FILEPATH = os.path.join(os.path.dirname(__file__), \"media/image_bison.jpg\")\ndef image_to_base64(image_path):\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n return encoded_string\nasync def main():\n # Print base64-encoded image\n b64_image = image_to_base64(FILEPATH)\n agent = Agent(\n name=\"Assistant\",", - "detail": "examples.basic.local_image", - "documentation": {} - }, - { - "label": "OutputType", - "kind": 6, - "importPath": "examples.basic.non_strict_output_type", - "description": "examples.basic.non_strict_output_type", - "peekOfCode": "class OutputType:\n jokes: dict[int, str]\n \"\"\"A list of jokes, indexed by joke number.\"\"\"\nclass CustomOutputSchema(AgentOutputSchemaBase):\n \"\"\"A demonstration of a custom output schema.\"\"\"\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"CustomOutputSchema\"\n def json_schema(self) -> dict[str, Any]:", - "detail": "examples.basic.non_strict_output_type", - "documentation": {} - }, - { - "label": "CustomOutputSchema", - "kind": 6, - "importPath": "examples.basic.non_strict_output_type", - "description": "examples.basic.non_strict_output_type", - "peekOfCode": "class CustomOutputSchema(AgentOutputSchemaBase):\n \"\"\"A demonstration of a custom output schema.\"\"\"\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"CustomOutputSchema\"\n def json_schema(self) -> dict[str, Any]:\n return {\n \"type\": \"object\",\n \"properties\": {\"jokes\": {\"type\": \"object\", \"properties\": {\"joke\": {\"type\": \"string\"}}}},", - "detail": "examples.basic.non_strict_output_type", - "documentation": {} - }, - { - "label": "DynamicContext", - "kind": 6, - "importPath": "examples.basic.prompt_template", - "description": "examples.basic.prompt_template", - "peekOfCode": "class DynamicContext:\n def __init__(self, prompt_id: str):\n self.prompt_id = prompt_id\n self.poem_style = random.choice([\"limerick\", \"haiku\", \"ballad\"])\n print(f\"[debug] DynamicContext initialized with poem_style: {self.poem_style}\")\nasync def _get_dynamic_prompt(data: GenerateDynamicPromptData):\n ctx: DynamicContext = data.context.context\n return {\n \"id\": ctx.prompt_id,\n \"version\": \"1\",", - "detail": "examples.basic.prompt_template", - "documentation": {} - }, - { - "label": "DEFAULT_PROMPT_ID", - "kind": 5, - "importPath": "examples.basic.prompt_template", - "description": "examples.basic.prompt_template", - "peekOfCode": "DEFAULT_PROMPT_ID = \"pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b\"\nclass DynamicContext:\n def __init__(self, prompt_id: str):\n self.prompt_id = prompt_id\n self.poem_style = random.choice([\"limerick\", \"haiku\", \"ballad\"])\n print(f\"[debug] DynamicContext initialized with poem_style: {self.poem_style}\")\nasync def _get_dynamic_prompt(data: GenerateDynamicPromptData):\n ctx: DynamicContext = data.context.context\n return {\n \"id\": ctx.prompt_id,", - "detail": "examples.basic.prompt_template", - "documentation": {} - }, - { - "label": "URL", - "kind": 5, - "importPath": "examples.basic.remote_image", - "description": "examples.basic.remote_image", - "peekOfCode": "URL = \"https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n )\n result = await Runner.run(\n agent,\n [\n {", - "detail": "examples.basic.remote_image", - "documentation": {} - }, - { - "label": "how_many_jokes", - "kind": 2, - "importPath": "examples.basic.stream_items", - "description": "examples.basic.stream_items", - "peekOfCode": "def how_many_jokes() -> int:\n return random.randint(1, 10)\nasync def main():\n agent = Agent(\n name=\"Joker\",\n instructions=\"First call the `how_many_jokes` tool, then tell that many jokes.\",\n tools=[how_many_jokes],\n )\n result = Runner.run_streamed(\n agent,", - "detail": "examples.basic.stream_items", - "documentation": {} - }, - { - "label": "Weather", - "kind": 6, - "importPath": "examples.basic.tools", - "description": "examples.basic.tools", - "peekOfCode": "class Weather(BaseModel):\n city: str\n temperature_range: str\n conditions: str\n@function_tool\ndef get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind.\")\nagent = Agent(\n name=\"Hello world\",", - "detail": "examples.basic.tools", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.basic.tools", - "description": "examples.basic.tools", - "peekOfCode": "def get_weather(city: str) -> Weather:\n print(\"[debug] get_weather called\")\n return Weather(city=city, temperature_range=\"14-20C\", conditions=\"Sunny with wind.\")\nagent = Agent(\n name=\"Hello world\",\n instructions=\"You are a helpful agent.\",\n tools=[get_weather],\n)\nasync def main():\n result = await Runner.run(agent, input=\"What's the weather in Tokyo?\")", - "detail": "examples.basic.tools", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.basic.tools", - "description": "examples.basic.tools", - "peekOfCode": "agent = Agent(\n name=\"Hello world\",\n instructions=\"You are a helpful agent.\",\n tools=[get_weather],\n)\nasync def main():\n result = await Runner.run(agent, input=\"What's the weather in Tokyo?\")\n print(result.final_output)\n # The weather in Tokyo is sunny.\nif __name__ == \"__main__\":", - "detail": "examples.basic.tools", - "documentation": {} - }, - { - "label": "AirlineAgentContext", - "kind": 6, - "importPath": "examples.customer_service.main", - "description": "examples.customer_service.main", - "peekOfCode": "class AirlineAgentContext(BaseModel):\n passenger_name: str | None = None\n confirmation_number: str | None = None\n seat_number: str | None = None\n flight_number: str | None = None\n### TOOLS\n@function_tool(\n name_override=\"faq_lookup_tool\", description_override=\"Lookup frequently asked questions.\"\n)\nasync def faq_lookup_tool(question: str) -> str:", - "detail": "examples.customer_service.main", - "documentation": {} - }, - { - "label": "faq_agent", - "kind": 5, - "importPath": "examples.customer_service.main", - "description": "examples.customer_service.main", - "peekOfCode": "faq_agent = Agent[AirlineAgentContext](\n name=\"FAQ Agent\",\n handoff_description=\"A helpful agent that can answer questions about the airline.\",\n instructions=f\"\"\"{RECOMMENDED_PROMPT_PREFIX}\n You are an FAQ agent. If you are speaking to a customer, you probably were transferred to from the triage agent.\n Use the following routine to support the customer.\n # Routine\n 1. Identify the last question asked by the customer.\n 2. Use the faq lookup tool to answer the question. Do not rely on your own knowledge.\n 3. If you cannot answer the question, transfer back to the triage agent.\"\"\",", - "detail": "examples.customer_service.main", - "documentation": {} - }, - { - "label": "seat_booking_agent", - "kind": 5, - "importPath": "examples.customer_service.main", - "description": "examples.customer_service.main", - "peekOfCode": "seat_booking_agent = Agent[AirlineAgentContext](\n name=\"Seat Booking Agent\",\n handoff_description=\"A helpful agent that can update a seat on a flight.\",\n instructions=f\"\"\"{RECOMMENDED_PROMPT_PREFIX}\n You are a seat booking agent. If you are speaking to a customer, you probably were transferred to from the triage agent.\n Use the following routine to support the customer.\n # Routine\n 1. Ask for their confirmation number.\n 2. Ask the customer what their desired seat number is.\n 3. Use the update seat tool to update the seat on the flight.", - "detail": "examples.customer_service.main", - "documentation": {} - }, - { - "label": "triage_agent", - "kind": 5, - "importPath": "examples.customer_service.main", - "description": "examples.customer_service.main", - "peekOfCode": "triage_agent = Agent[AirlineAgentContext](\n name=\"Triage Agent\",\n handoff_description=\"A triage agent that can delegate a customer's request to the appropriate agent.\",\n instructions=(\n f\"{RECOMMENDED_PROMPT_PREFIX} \"\n \"You are a helpful triaging agent. You can use your tools to delegate questions to other appropriate agents.\"\n ),\n handoffs=[\n faq_agent,\n handoff(agent=seat_booking_agent, on_handoff=on_seat_booking_handoff),", - "detail": "examples.customer_service.main", - "documentation": {} - }, - { - "label": "AnalysisSummary", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.financials_agent", - "description": "examples.financial_research_agent.agents.financials_agent", - "peekOfCode": "class AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nfinancials_agent = Agent(\n name=\"FundamentalsAnalystAgent\",\n instructions=FINANCIALS_PROMPT,\n output_type=AnalysisSummary,\n)", - "detail": "examples.financial_research_agent.agents.financials_agent", - "documentation": {} - }, - { - "label": "FINANCIALS_PROMPT", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.financials_agent", - "description": "examples.financial_research_agent.agents.financials_agent", - "peekOfCode": "FINANCIALS_PROMPT = (\n \"You are a financial analyst focused on company fundamentals such as revenue, \"\n \"profit, margins and growth trajectory. Given a collection of web (and optional file) \"\n \"search results about a company, write a concise analysis of its recent financial \"\n \"performance. Pull out key metrics or quotes. Keep it under 2 paragraphs.\"\n)\nclass AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nfinancials_agent = Agent(", - "detail": "examples.financial_research_agent.agents.financials_agent", - "documentation": {} - }, - { - "label": "financials_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.financials_agent", - "description": "examples.financial_research_agent.agents.financials_agent", - "peekOfCode": "financials_agent = Agent(\n name=\"FundamentalsAnalystAgent\",\n instructions=FINANCIALS_PROMPT,\n output_type=AnalysisSummary,\n)", - "detail": "examples.financial_research_agent.agents.financials_agent", - "documentation": {} - }, - { - "label": "FinancialSearchItem", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.planner_agent", - "description": "examples.financial_research_agent.agents.planner_agent", - "peekOfCode": "class FinancialSearchItem(BaseModel):\n reason: str\n \"\"\"Your reasoning for why this search is relevant.\"\"\"\n query: str\n \"\"\"The search term to feed into a web (or file) search.\"\"\"\nclass FinancialSearchPlan(BaseModel):\n searches: list[FinancialSearchItem]\n \"\"\"A list of searches to perform.\"\"\"\nplanner_agent = Agent(\n name=\"FinancialPlannerAgent\",", - "detail": "examples.financial_research_agent.agents.planner_agent", - "documentation": {} - }, - { - "label": "FinancialSearchPlan", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.planner_agent", - "description": "examples.financial_research_agent.agents.planner_agent", - "peekOfCode": "class FinancialSearchPlan(BaseModel):\n searches: list[FinancialSearchItem]\n \"\"\"A list of searches to perform.\"\"\"\nplanner_agent = Agent(\n name=\"FinancialPlannerAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=FinancialSearchPlan,\n)", - "detail": "examples.financial_research_agent.agents.planner_agent", - "documentation": {} - }, - { - "label": "PROMPT", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.planner_agent", - "description": "examples.financial_research_agent.agents.planner_agent", - "peekOfCode": "PROMPT = (\n \"You are a financial research planner. Given a request for financial analysis, \"\n \"produce a set of web searches to gather the context needed. Aim for recent \"\n \"headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. \"\n \"Output between 5 and 15 search terms to query for.\"\n)\nclass FinancialSearchItem(BaseModel):\n reason: str\n \"\"\"Your reasoning for why this search is relevant.\"\"\"\n query: str", - "detail": "examples.financial_research_agent.agents.planner_agent", - "documentation": {} - }, - { - "label": "planner_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.planner_agent", - "description": "examples.financial_research_agent.agents.planner_agent", - "peekOfCode": "planner_agent = Agent(\n name=\"FinancialPlannerAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=FinancialSearchPlan,\n)", - "detail": "examples.financial_research_agent.agents.planner_agent", - "documentation": {} - }, - { - "label": "AnalysisSummary", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.risk_agent", - "description": "examples.financial_research_agent.agents.risk_agent", - "peekOfCode": "class AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nrisk_agent = Agent(\n name=\"RiskAnalystAgent\",\n instructions=RISK_PROMPT,\n output_type=AnalysisSummary,\n)", - "detail": "examples.financial_research_agent.agents.risk_agent", - "documentation": {} - }, - { - "label": "RISK_PROMPT", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.risk_agent", - "description": "examples.financial_research_agent.agents.risk_agent", - "peekOfCode": "RISK_PROMPT = (\n \"You are a risk analyst looking for potential red flags in a company's outlook. \"\n \"Given background research, produce a short analysis of risks such as competitive threats, \"\n \"regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs.\"\n)\nclass AnalysisSummary(BaseModel):\n summary: str\n \"\"\"Short text summary for this aspect of the analysis.\"\"\"\nrisk_agent = Agent(\n name=\"RiskAnalystAgent\",", - "detail": "examples.financial_research_agent.agents.risk_agent", - "documentation": {} - }, - { - "label": "risk_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.risk_agent", - "description": "examples.financial_research_agent.agents.risk_agent", - "peekOfCode": "risk_agent = Agent(\n name=\"RiskAnalystAgent\",\n instructions=RISK_PROMPT,\n output_type=AnalysisSummary,\n)", - "detail": "examples.financial_research_agent.agents.risk_agent", - "documentation": {} - }, - { - "label": "INSTRUCTIONS", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.search_agent", - "description": "examples.financial_research_agent.agents.search_agent", - "peekOfCode": "INSTRUCTIONS = (\n \"You are a research assistant specializing in financial topics. \"\n \"Given a search term, use web search to retrieve up‑to‑date context and \"\n \"produce a short summary of at most 300 words. Focus on key numbers, events, \"\n \"or quotes that will be useful to a financial analyst.\"\n)\nsearch_agent = Agent(\n name=\"FinancialSearchAgent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],", - "detail": "examples.financial_research_agent.agents.search_agent", - "documentation": {} - }, - { - "label": "search_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.search_agent", - "description": "examples.financial_research_agent.agents.search_agent", - "peekOfCode": "search_agent = Agent(\n name=\"FinancialSearchAgent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],\n model_settings=ModelSettings(tool_choice=\"required\"),\n)", - "detail": "examples.financial_research_agent.agents.search_agent", - "documentation": {} - }, - { - "label": "VerificationResult", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.verifier_agent", - "description": "examples.financial_research_agent.agents.verifier_agent", - "peekOfCode": "class VerificationResult(BaseModel):\n verified: bool\n \"\"\"Whether the report seems coherent and plausible.\"\"\"\n issues: str\n \"\"\"If not verified, describe the main issues or concerns.\"\"\"\nverifier_agent = Agent(\n name=\"VerificationAgent\",\n instructions=VERIFIER_PROMPT,\n model=\"gpt-4o\",\n output_type=VerificationResult,", - "detail": "examples.financial_research_agent.agents.verifier_agent", - "documentation": {} - }, - { - "label": "VERIFIER_PROMPT", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.verifier_agent", - "description": "examples.financial_research_agent.agents.verifier_agent", - "peekOfCode": "VERIFIER_PROMPT = (\n \"You are a meticulous auditor. You have been handed a financial analysis report. \"\n \"Your job is to verify the report is internally consistent, clearly sourced, and makes \"\n \"no unsupported claims. Point out any issues or uncertainties.\"\n)\nclass VerificationResult(BaseModel):\n verified: bool\n \"\"\"Whether the report seems coherent and plausible.\"\"\"\n issues: str\n \"\"\"If not verified, describe the main issues or concerns.\"\"\"", - "detail": "examples.financial_research_agent.agents.verifier_agent", - "documentation": {} - }, - { - "label": "verifier_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.verifier_agent", - "description": "examples.financial_research_agent.agents.verifier_agent", - "peekOfCode": "verifier_agent = Agent(\n name=\"VerificationAgent\",\n instructions=VERIFIER_PROMPT,\n model=\"gpt-4o\",\n output_type=VerificationResult,\n)", - "detail": "examples.financial_research_agent.agents.verifier_agent", - "documentation": {} - }, - { - "label": "FinancialReportData", - "kind": 6, - "importPath": "examples.financial_research_agent.agents.writer_agent", - "description": "examples.financial_research_agent.agents.writer_agent", - "peekOfCode": "class FinancialReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2‑3 sentence executive summary.\"\"\"\n markdown_report: str\n \"\"\"The full markdown report.\"\"\"\n follow_up_questions: list[str]\n \"\"\"Suggested follow‑up questions for further research.\"\"\"\n# Note: We will attach handoffs to specialist analyst agents at runtime in the manager.\n# This shows how an agent can use handoffs to delegate to specialized subagents.\nwriter_agent = Agent(", - "detail": "examples.financial_research_agent.agents.writer_agent", - "documentation": {} - }, - { - "label": "WRITER_PROMPT", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.writer_agent", - "description": "examples.financial_research_agent.agents.writer_agent", - "peekOfCode": "WRITER_PROMPT = (\n \"You are a senior financial analyst. You will be provided with the original query and \"\n \"a set of raw search summaries. Your task is to synthesize these into a long‑form markdown \"\n \"report (at least several paragraphs) including a short executive summary and follow‑up \"\n \"questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, \"\n \"risk_analysis) to get short specialist write‑ups to incorporate.\"\n)\nclass FinancialReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2‑3 sentence executive summary.\"\"\"", - "detail": "examples.financial_research_agent.agents.writer_agent", - "documentation": {} - }, - { - "label": "writer_agent", - "kind": 5, - "importPath": "examples.financial_research_agent.agents.writer_agent", - "description": "examples.financial_research_agent.agents.writer_agent", - "peekOfCode": "writer_agent = Agent(\n name=\"FinancialWriterAgent\",\n instructions=WRITER_PROMPT,\n model=\"gpt-4.5-preview-2025-02-27\",\n output_type=FinancialReportData,\n)", - "detail": "examples.financial_research_agent.agents.writer_agent", - "documentation": {} - }, - { - "label": "FinancialResearchManager", - "kind": 6, - "importPath": "examples.financial_research_agent.manager", - "description": "examples.financial_research_agent.manager", - "peekOfCode": "class FinancialResearchManager:\n \"\"\"\n Orchestrates the full flow: planning, searching, sub‑analysis, writing, and verification.\n \"\"\"\n def __init__(self) -> None:\n self.console = Console()\n self.printer = Printer(self.console)\n async def run(self, query: str) -> None:\n trace_id = gen_trace_id()\n with trace(\"Financial research trace\", trace_id=trace_id):", - "detail": "examples.financial_research_agent.manager", - "documentation": {} - }, - { - "label": "Printer", - "kind": 6, - "importPath": "examples.financial_research_agent.printer", - "description": "examples.financial_research_agent.printer", - "peekOfCode": "class Printer:\n \"\"\"\n Simple wrapper to stream status updates. Used by the financial bot\n manager as it orchestrates planning, search and writing.\n \"\"\"\n def __init__(self, console: Console) -> None:\n self.live = Live(console=console)\n self.items: dict[str, tuple[str, bool]] = {}\n self.hide_done_ids: set[str] = set()\n self.live.start()", - "detail": "examples.financial_research_agent.printer", - "documentation": {} - }, - { - "label": "random_number_tool", - "kind": 2, - "importPath": "examples.handoffs.message_filter", - "description": "examples.handoffs.message_filter", - "peekOfCode": "def random_number_tool(max: int) -> int:\n \"\"\"Return a random integer between 0 and the given maximum.\"\"\"\n return random.randint(0, max)\ndef spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)", - "detail": "examples.handoffs.message_filter", - "documentation": {} - }, - { - "label": "spanish_handoff_message_filter", - "kind": 2, - "importPath": "examples.handoffs.message_filter", - "description": "examples.handoffs.message_filter", - "peekOfCode": "def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)\n else handoff_message_data.input_history\n )\n return HandoffInputData(", - "detail": "examples.handoffs.message_filter", - "documentation": {} - }, - { - "label": "first_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter", - "description": "examples.handoffs.message_filter", - "peekOfCode": "first_agent = Agent(\n name=\"Assistant\",\n instructions=\"Be extremely concise.\",\n tools=[random_number_tool],\n)\nspanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)", - "detail": "examples.handoffs.message_filter", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter", - "description": "examples.handoffs.message_filter", - "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)\nsecond_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),", - "detail": "examples.handoffs.message_filter", - "documentation": {} - }, - { - "label": "second_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter", - "description": "examples.handoffs.message_filter", - "peekOfCode": "second_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),\n handoffs=[handoff(spanish_agent, input_filter=spanish_handoff_message_filter)],\n)\nasync def main():\n # Trace the entire run as a single workflow\n with trace(workflow_name=\"Message filtering\"):", - "detail": "examples.handoffs.message_filter", - "documentation": {} - }, - { - "label": "random_number_tool", - "kind": 2, - "importPath": "examples.handoffs.message_filter_streaming", - "description": "examples.handoffs.message_filter_streaming", - "peekOfCode": "def random_number_tool(max: int) -> int:\n \"\"\"Return a random integer between 0 and the given maximum.\"\"\"\n return random.randint(0, max)\ndef spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)", - "detail": "examples.handoffs.message_filter_streaming", - "documentation": {} - }, - { - "label": "spanish_handoff_message_filter", - "kind": 2, - "importPath": "examples.handoffs.message_filter_streaming", - "description": "examples.handoffs.message_filter_streaming", - "peekOfCode": "def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:\n # First, we'll remove any tool-related messages from the message history\n handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)\n # Second, we'll also remove the first two items from the history, just for demonstration\n history = (\n tuple(handoff_message_data.input_history[2:])\n if isinstance(handoff_message_data.input_history, tuple)\n else handoff_message_data.input_history\n )\n return HandoffInputData(", - "detail": "examples.handoffs.message_filter_streaming", - "documentation": {} - }, - { - "label": "first_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter_streaming", - "description": "examples.handoffs.message_filter_streaming", - "peekOfCode": "first_agent = Agent(\n name=\"Assistant\",\n instructions=\"Be extremely concise.\",\n tools=[random_number_tool],\n)\nspanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)", - "detail": "examples.handoffs.message_filter_streaming", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter_streaming", - "description": "examples.handoffs.message_filter_streaming", - "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish Assistant\",\n instructions=\"You only speak Spanish and are extremely concise.\",\n handoff_description=\"A Spanish-speaking assistant.\",\n)\nsecond_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),", - "detail": "examples.handoffs.message_filter_streaming", - "documentation": {} - }, - { - "label": "second_agent", - "kind": 5, - "importPath": "examples.handoffs.message_filter_streaming", - "description": "examples.handoffs.message_filter_streaming", - "peekOfCode": "second_agent = Agent(\n name=\"Assistant\",\n instructions=(\n \"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant.\"\n ),\n handoffs=[handoff(spanish_agent, input_filter=spanish_handoff_message_filter)],\n)\nasync def main():\n # Trace the entire run as a single workflow\n with trace(workflow_name=\"Streaming message filter\"):", - "detail": "examples.handoffs.message_filter_streaming", - "documentation": {} - }, - { - "label": "approval_callback", - "kind": 2, - "importPath": "examples.hosted_mcp.approvals", - "description": "examples.hosted_mcp.approvals", - "peekOfCode": "def approval_callback(request: MCPToolApprovalRequest) -> MCPToolApprovalFunctionResult:\n answer = input(f\"Approve running the tool `{request.data.name}`? (y/n) \")\n result: MCPToolApprovalFunctionResult = {\"approve\": answer == \"y\"}\n if not result[\"approve\"]:\n result[\"reason\"] = \"User denied\"\n return result\nasync def main(verbose: bool, stream: bool):\n agent = Agent(\n name=\"Assistant\",\n tools=[", - "detail": "examples.hosted_mcp.approvals", - "documentation": {} - }, - { - "label": "process_user_input", - "kind": 2, - "importPath": "examples.mcp.prompt_server.main", - "description": "examples.mcp.prompt_server.main", - "peekOfCode": "def process_user_input(user_input):\n command = f\"echo {user_input}\"\n os.system(command)\n return \"Command executed\"\n\"\"\"\n print(f\"Running: {message[:60]}...\")\n result = await Runner.run(starting_agent=agent, input=message)\n print(result.final_output)\n print(\"\\n\" + \"=\" * 50 + \"\\n\")\nasync def show_available_prompts(mcp_server: MCPServer):", - "detail": "examples.mcp.prompt_server.main", - "documentation": {} - }, - { - "label": "generate_code_review_instructions", - "kind": 2, - "importPath": "examples.mcp.prompt_server.server", - "description": "examples.mcp.prompt_server.server", - "peekOfCode": "def generate_code_review_instructions(\n focus: str = \"general code quality\", language: str = \"python\"\n) -> str:\n \"\"\"Generate agent instructions for code review tasks\"\"\"\n print(f\"[debug-server] generate_code_review_instructions({focus}, {language})\")\n return f\"\"\"You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}.\nINSTRUCTIONS:\n- Analyze code for quality, security, performance, and best practices\n- Provide specific, actionable feedback with examples\n- Identify potential bugs, vulnerabilities, and optimization opportunities", - "detail": "examples.mcp.prompt_server.server", - "documentation": {} - }, - { - "label": "mcp", - "kind": 5, - "importPath": "examples.mcp.prompt_server.server", - "description": "examples.mcp.prompt_server.server", - "peekOfCode": "mcp = FastMCP(\"Prompt Server\")\n# Instruction-generating prompts (user-controlled)\n@mcp.prompt()\ndef generate_code_review_instructions(\n focus: str = \"general code quality\", language: str = \"python\"\n) -> str:\n \"\"\"Generate agent instructions for code review tasks\"\"\"\n print(f\"[debug-server] generate_code_review_instructions({focus}, {language})\")\n return f\"\"\"You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}.\nINSTRUCTIONS:", - "detail": "examples.mcp.prompt_server.server", - "documentation": {} - }, - { - "label": "add", - "kind": 2, - "importPath": "examples.mcp.sse_example.server", - "description": "examples.mcp.sse_example.server", - "peekOfCode": "def add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:", - "detail": "examples.mcp.sse_example.server", - "documentation": {} - }, - { - "label": "get_secret_word", - "kind": 2, - "importPath": "examples.mcp.sse_example.server", - "description": "examples.mcp.sse_example.server", - "peekOfCode": "def get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":", - "detail": "examples.mcp.sse_example.server", - "documentation": {} - }, - { - "label": "get_current_weather", - "kind": 2, - "importPath": "examples.mcp.sse_example.server", - "description": "examples.mcp.sse_example.server", - "peekOfCode": "def get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":\n mcp.run(transport=\"sse\")", - "detail": "examples.mcp.sse_example.server", - "documentation": {} - }, - { - "label": "mcp", - "kind": 5, - "importPath": "examples.mcp.sse_example.server", - "description": "examples.mcp.sse_example.server", - "peekOfCode": "mcp = FastMCP(\"Echo Server\")\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])", - "detail": "examples.mcp.sse_example.server", - "documentation": {} - }, - { - "label": "add", - "kind": 2, - "importPath": "examples.mcp.streamablehttp_example.server", - "description": "examples.mcp.streamablehttp_example.server", - "peekOfCode": "def add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:", - "detail": "examples.mcp.streamablehttp_example.server", - "documentation": {} - }, - { - "label": "get_secret_word", - "kind": 2, - "importPath": "examples.mcp.streamablehttp_example.server", - "description": "examples.mcp.streamablehttp_example.server", - "peekOfCode": "def get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])\n@mcp.tool()\ndef get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":", - "detail": "examples.mcp.streamablehttp_example.server", - "documentation": {} - }, - { - "label": "get_current_weather", - "kind": 2, - "importPath": "examples.mcp.streamablehttp_example.server", - "description": "examples.mcp.streamablehttp_example.server", - "peekOfCode": "def get_current_weather(city: str) -> str:\n print(f\"[debug-server] get_current_weather({city})\")\n endpoint = \"https://wttr.in\"\n response = requests.get(f\"{endpoint}/{city}\")\n return response.text\nif __name__ == \"__main__\":\n mcp.run(transport=\"streamable-http\")", - "detail": "examples.mcp.streamablehttp_example.server", - "documentation": {} - }, - { - "label": "mcp", - "kind": 5, - "importPath": "examples.mcp.streamablehttp_example.server", - "description": "examples.mcp.streamablehttp_example.server", - "peekOfCode": "mcp = FastMCP(\"Echo Server\")\n@mcp.tool()\ndef add(a: int, b: int) -> int:\n \"\"\"Add two numbers\"\"\"\n print(f\"[debug-server] add({a}, {b})\")\n return a + b\n@mcp.tool()\ndef get_secret_word() -> str:\n print(\"[debug-server] get_secret_word()\")\n return random.choice([\"apple\", \"banana\", \"cherry\"])", - "detail": "examples.mcp.streamablehttp_example.server", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.model_providers.custom_example_agent", - "description": "examples.model_providers.custom_example_agent", - "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n # This agent will use the custom LLM provider\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),\n tools=[get_weather],", - "detail": "examples.model_providers.custom_example_agent", - "documentation": {} - }, - { - "label": "BASE_URL", - "kind": 5, - "importPath": "examples.model_providers.custom_example_agent", - "description": "examples.model_providers.custom_example_agent", - "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.", - "detail": "examples.model_providers.custom_example_agent", - "documentation": {} - }, - { - "label": "API_KEY", - "kind": 5, - "importPath": "examples.model_providers.custom_example_agent", - "description": "examples.model_providers.custom_example_agent", - "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.\n3. Set the `model` on the Agent.", - "detail": "examples.model_providers.custom_example_agent", - "documentation": {} - }, - { - "label": "MODEL_NAME", - "kind": 5, - "importPath": "examples.model_providers.custom_example_agent", - "description": "examples.model_providers.custom_example_agent", - "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for a specific agent. Steps:\n1. Create a custom OpenAI client.\n2. Create a `Model` that uses the custom client.\n3. Set the `model` on the Agent.\nNote that in this example, we disable tracing under the assumption that you don't have an API key", - "detail": "examples.model_providers.custom_example_agent", - "documentation": {} - }, - { - "label": "client", - "kind": 5, - "importPath": "examples.model_providers.custom_example_agent", - "description": "examples.model_providers.custom_example_agent", - "peekOfCode": "client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)\nset_tracing_disabled(disabled=True)\n# An alternate approach that would also work:\n# PROVIDER = OpenAIProvider(openai_client=client)\n# agent = Agent(..., model=\"some-custom-model\")\n# Runner.run(agent, ..., run_config=RunConfig(model_provider=PROVIDER))\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"", - "detail": "examples.model_providers.custom_example_agent", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.model_providers.custom_example_global", - "description": "examples.model_providers.custom_example_global", - "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=MODEL_NAME,\n tools=[get_weather],\n )", - "detail": "examples.model_providers.custom_example_global", - "documentation": {} - }, - { - "label": "BASE_URL", - "kind": 5, - "importPath": "examples.model_providers.custom_example_global", - "description": "examples.model_providers.custom_example_global", - "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.", - "detail": "examples.model_providers.custom_example_global", - "documentation": {} - }, - { - "label": "API_KEY", - "kind": 5, - "importPath": "examples.model_providers.custom_example_global", - "description": "examples.model_providers.custom_example_global", - "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.\n3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.", - "detail": "examples.model_providers.custom_example_global", - "documentation": {} - }, - { - "label": "MODEL_NAME", - "kind": 5, - "importPath": "examples.model_providers.custom_example_global", - "description": "examples.model_providers.custom_example_global", - "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for all requests by default. We do three things:\n1. Create a custom client.\n2. Set it as the default OpenAI client, and don't use it for tracing.\n3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.\nNote that in this example, we disable tracing under the assumption that you don't have an API key", - "detail": "examples.model_providers.custom_example_global", - "documentation": {} - }, - { - "label": "client", - "kind": 5, - "importPath": "examples.model_providers.custom_example_global", - "description": "examples.model_providers.custom_example_global", - "peekOfCode": "client = AsyncOpenAI(\n base_url=BASE_URL,\n api_key=API_KEY,\n)\nset_default_openai_client(client=client, use_for_tracing=False)\nset_default_openai_api(\"chat_completions\")\nset_tracing_disabled(disabled=True)\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")", - "detail": "examples.model_providers.custom_example_global", - "documentation": {} - }, - { - "label": "CustomModelProvider", - "kind": 6, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "class CustomModelProvider(ModelProvider):\n def get_model(self, model_name: str | None) -> Model:\n return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)\nCUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])\n # This will use the custom model provider\n result = await Runner.run(\n agent,\n \"What's the weather in Tokyo?\",\n run_config=RunConfig(model_provider=CUSTOM_MODEL_PROVIDER),", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "BASE_URL", - "kind": 5, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "BASE_URL = os.getenv(\"EXAMPLE_BASE_URL\") or \"\"\nAPI_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "API_KEY", - "kind": 5, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "API_KEY = os.getenv(\"EXAMPLE_API_KEY\") or \"\"\nMODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.\n2. Create a ModelProvider that uses the custom client.", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "MODEL_NAME", - "kind": 5, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"\"\nif not BASE_URL or not API_KEY or not MODEL_NAME:\n raise ValueError(\n \"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code.\"\n )\n\"\"\"This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for\nothers. Steps:\n1. Create a custom OpenAI client.\n2. Create a ModelProvider that uses the custom client.\n3. Use the ModelProvider in calls to Runner.run(), only when we want to use the custom LLM provider.", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "client", - "kind": 5, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)\nset_tracing_disabled(disabled=True)\nclass CustomModelProvider(ModelProvider):\n def get_model(self, model_name: str | None) -> Model:\n return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)\nCUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "CUSTOM_MODEL_PROVIDER", - "kind": 5, - "importPath": "examples.model_providers.custom_example_provider", - "description": "examples.model_providers.custom_example_provider", - "peekOfCode": "CUSTOM_MODEL_PROVIDER = CustomModelProvider()\n@function_tool\ndef get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(name=\"Assistant\", instructions=\"You only respond in haikus.\", tools=[get_weather])\n # This will use the custom model provider\n result = await Runner.run(\n agent,", - "detail": "examples.model_providers.custom_example_provider", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.model_providers.litellm_auto", - "description": "examples.model_providers.litellm_auto", - "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main():\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n # We prefix with litellm/ to tell the Runner to use the LitellmModel\n model=\"litellm/anthropic/claude-3-5-sonnet-20240620\",\n tools=[get_weather],", - "detail": "examples.model_providers.litellm_auto", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.model_providers.litellm_provider", - "description": "examples.model_providers.litellm_provider", - "peekOfCode": "def get_weather(city: str):\n print(f\"[debug] getting weather for {city}\")\n return f\"The weather in {city} is sunny.\"\nasync def main(model: str, api_key: str):\n agent = Agent(\n name=\"Assistant\",\n instructions=\"You only respond in haikus.\",\n model=LitellmModel(model=model, api_key=api_key),\n tools=[get_weather],\n )", - "detail": "examples.model_providers.litellm_provider", - "documentation": {} - }, - { - "label": "Example", - "kind": 6, - "importPath": "examples.realtime.demo", - "description": "examples.realtime.demo", - "peekOfCode": "class Example:\n def __init__(self) -> None:\n self.ui = AppUI()\n self.ui.connected = asyncio.Event()\n self.ui.last_audio_item_id = None\n # Set the audio callback\n self.ui.set_audio_callback(self.on_audio_recorded)\n self.session: RealtimeSession | None = None\n async def run(self) -> None:\n # Start UI in a separate task instead of waiting for it to complete", - "detail": "examples.realtime.demo", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.realtime.demo", - "description": "examples.realtime.demo", - "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"\nagent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:", - "detail": "examples.realtime.demo", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.realtime.demo", - "description": "examples.realtime.demo", - "peekOfCode": "agent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:\n return s[:max_length] + \"...\"\n return s\nclass Example:", - "detail": "examples.realtime.demo", - "documentation": {} - }, - { - "label": "NoUIDemo", - "kind": 6, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "class NoUIDemo:\n def __init__(self) -> None:\n self.session: RealtimeSession | None = None\n self.audio_stream: sd.InputStream | None = None\n self.audio_player: sd.OutputStream | None = None\n self.recording = False\n async def run(self) -> None:\n print(\"Connecting, may take a few seconds...\")\n # Initialize audio player\n self.audio_player = sd.OutputStream(", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"\nagent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "CHUNK_LENGTH_S", - "kind": 5, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 50ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "SAMPLE_RATE", - "kind": 5, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "FORMAT", - "kind": 5, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "CHANNELS", - "kind": 5, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "CHANNELS = 1\n# Set up logging for OpenAI agents SDK\n# logging.basicConfig(\n# level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n# )\n# logger.logger.setLevel(logging.ERROR)\n@function_tool\ndef get_weather(city: str) -> str:\n \"\"\"Get the weather in a city.\"\"\"\n return f\"The weather in {city} is sunny.\"", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.realtime.no_ui_demo", - "description": "examples.realtime.no_ui_demo", - "peekOfCode": "agent = RealtimeAgent(\n name=\"Assistant\",\n instructions=\"You always greet the user with 'Top of the morning to you'.\",\n tools=[get_weather],\n)\ndef _truncate_str(s: str, max_length: int) -> str:\n if len(s) > max_length:\n return s[:max_length] + \"...\"\n return s\nclass NoUIDemo:", - "detail": "examples.realtime.no_ui_demo", - "documentation": {} - }, - { - "label": "Header", - "kind": 6, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "class Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "AudioStatusIndicator", - "kind": 6, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "class AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:\n status = (\n \"🔴 Conversation started.\"\n if self.is_recording\n else \"⚪ Press SPACE to start the conversation (q to quit)\"\n )", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "AppUI", - "kind": 6, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "class AppUI(App[None]):\n CSS = \"\"\"\n Screen {\n background: #1a1b26; /* Dark blue-grey background */\n }\n Container {\n border: double rgb(91, 164, 91);\n }\n #input-container {\n height: 5; /* Explicit height for input container */", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "CHUNK_LENGTH_S", - "kind": 5, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 50ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "SAMPLE_RATE", - "kind": 5, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "FORMAT", - "kind": 5, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "CHANNELS", - "kind": 5, - "importPath": "examples.realtime.ui", - "description": "examples.realtime.ui", - "peekOfCode": "CHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n @override\n def render(self) -> str:\n return \"Realtime Demo\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override", - "detail": "examples.realtime.ui", - "documentation": {} - }, - { - "label": "MODEL_NAME", - "kind": 5, - "importPath": "examples.reasoning_content.main", - "description": "examples.reasoning_content.main", - "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"deepseek-reasoner\"\nasync def stream_with_reasoning_content():\n \"\"\"\n Example of streaming a response from a model that provides reasoning content.\n The reasoning content will be emitted as separate events.\n \"\"\"\n provider = OpenAIProvider()\n model = provider.get_model(MODEL_NAME)\n print(\"\\n=== Streaming Example ===\")\n print(\"Prompt: Write a haiku about recursion in programming\")", - "detail": "examples.reasoning_content.main", - "documentation": {} - }, - { - "label": "MODEL_NAME", - "kind": 5, - "importPath": "examples.reasoning_content.runner_example", - "description": "examples.reasoning_content.runner_example", - "peekOfCode": "MODEL_NAME = os.getenv(\"EXAMPLE_MODEL_NAME\") or \"deepseek-reasoner\"\nasync def main():\n print(f\"Using model: {MODEL_NAME}\")\n # Create an agent with a model that supports reasoning content\n agent = Agent(\n name=\"Reasoning Agent\",\n instructions=\"You are a helpful assistant that explains your reasoning step by step.\",\n model=MODEL_NAME,\n )\n # Example 1: Non-streaming response", - "detail": "examples.reasoning_content.runner_example", - "documentation": {} - }, - { - "label": "WebSearchItem", - "kind": 6, - "importPath": "examples.research_bot.agents.planner_agent", - "description": "examples.research_bot.agents.planner_agent", - "peekOfCode": "class WebSearchItem(BaseModel):\n reason: str\n \"Your reasoning for why this search is important to the query.\"\n query: str\n \"The search term to use for the web search.\"\nclass WebSearchPlan(BaseModel):\n searches: list[WebSearchItem]\n \"\"\"A list of web searches to perform to best answer the query.\"\"\"\nplanner_agent = Agent(\n name=\"PlannerAgent\",", - "detail": "examples.research_bot.agents.planner_agent", - "documentation": {} - }, - { - "label": "WebSearchPlan", - "kind": 6, - "importPath": "examples.research_bot.agents.planner_agent", - "description": "examples.research_bot.agents.planner_agent", - "peekOfCode": "class WebSearchPlan(BaseModel):\n searches: list[WebSearchItem]\n \"\"\"A list of web searches to perform to best answer the query.\"\"\"\nplanner_agent = Agent(\n name=\"PlannerAgent\",\n instructions=PROMPT,\n model=\"gpt-4o\",\n output_type=WebSearchPlan,\n)", - "detail": "examples.research_bot.agents.planner_agent", - "documentation": {} - }, - { - "label": "PROMPT", - "kind": 5, - "importPath": "examples.research_bot.agents.planner_agent", - "description": "examples.research_bot.agents.planner_agent", - "peekOfCode": "PROMPT = (\n \"You are a helpful research assistant. Given a query, come up with a set of web searches \"\n \"to perform to best answer the query. Output between 5 and 20 terms to query for.\"\n)\nclass WebSearchItem(BaseModel):\n reason: str\n \"Your reasoning for why this search is important to the query.\"\n query: str\n \"The search term to use for the web search.\"\nclass WebSearchPlan(BaseModel):", - "detail": "examples.research_bot.agents.planner_agent", - "documentation": {} - }, - { - "label": "planner_agent", - "kind": 5, - "importPath": "examples.research_bot.agents.planner_agent", - "description": "examples.research_bot.agents.planner_agent", - "peekOfCode": "planner_agent = Agent(\n name=\"PlannerAgent\",\n instructions=PROMPT,\n model=\"gpt-4o\",\n output_type=WebSearchPlan,\n)", - "detail": "examples.research_bot.agents.planner_agent", - "documentation": {} - }, - { - "label": "INSTRUCTIONS", - "kind": 5, - "importPath": "examples.research_bot.agents.search_agent", - "description": "examples.research_bot.agents.search_agent", - "peekOfCode": "INSTRUCTIONS = (\n \"You are a research assistant. Given a search term, you search the web for that term and \"\n \"produce a concise summary of the results. The summary must be 2-3 paragraphs and less than 300 \"\n \"words. Capture the main points. Write succinctly, no need to have complete sentences or good \"\n \"grammar. This will be consumed by someone synthesizing a report, so its vital you capture the \"\n \"essence and ignore any fluff. Do not include any additional commentary other than the summary \"\n \"itself.\"\n)\nsearch_agent = Agent(\n name=\"Search agent\",", - "detail": "examples.research_bot.agents.search_agent", - "documentation": {} - }, - { - "label": "search_agent", - "kind": 5, - "importPath": "examples.research_bot.agents.search_agent", - "description": "examples.research_bot.agents.search_agent", - "peekOfCode": "search_agent = Agent(\n name=\"Search agent\",\n instructions=INSTRUCTIONS,\n tools=[WebSearchTool()],\n model_settings=ModelSettings(tool_choice=\"required\"),\n)", - "detail": "examples.research_bot.agents.search_agent", - "documentation": {} - }, - { - "label": "ReportData", - "kind": 6, - "importPath": "examples.research_bot.agents.writer_agent", - "description": "examples.research_bot.agents.writer_agent", - "peekOfCode": "class ReportData(BaseModel):\n short_summary: str\n \"\"\"A short 2-3 sentence summary of the findings.\"\"\"\n markdown_report: str\n \"\"\"The final report\"\"\"\n follow_up_questions: list[str]\n \"\"\"Suggested topics to research further\"\"\"\nwriter_agent = Agent(\n name=\"WriterAgent\",\n instructions=PROMPT,", - "detail": "examples.research_bot.agents.writer_agent", - "documentation": {} - }, - { - "label": "PROMPT", - "kind": 5, - "importPath": "examples.research_bot.agents.writer_agent", - "description": "examples.research_bot.agents.writer_agent", - "peekOfCode": "PROMPT = (\n \"You are a senior researcher tasked with writing a cohesive report for a research query. \"\n \"You will be provided with the original query, and some initial research done by a research \"\n \"assistant.\\n\"\n \"You should first come up with an outline for the report that describes the structure and \"\n \"flow of the report. Then, generate the report and return that as your final output.\\n\"\n \"The final output should be in markdown format, and it should be lengthy and detailed. Aim \"\n \"for 5-10 pages of content, at least 1000 words.\"\n)\nclass ReportData(BaseModel):", - "detail": "examples.research_bot.agents.writer_agent", - "documentation": {} - }, - { - "label": "writer_agent", - "kind": 5, - "importPath": "examples.research_bot.agents.writer_agent", - "description": "examples.research_bot.agents.writer_agent", - "peekOfCode": "writer_agent = Agent(\n name=\"WriterAgent\",\n instructions=PROMPT,\n model=\"o3-mini\",\n output_type=ReportData,\n)", - "detail": "examples.research_bot.agents.writer_agent", - "documentation": {} - }, - { - "label": "ResearchManager", - "kind": 6, - "importPath": "examples.research_bot.manager", - "description": "examples.research_bot.manager", - "peekOfCode": "class ResearchManager:\n def __init__(self):\n self.console = Console()\n self.printer = Printer(self.console)\n async def run(self, query: str) -> None:\n trace_id = gen_trace_id()\n with trace(\"Research trace\", trace_id=trace_id):\n self.printer.update_item(\n \"trace_id\",\n f\"View trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\",", - "detail": "examples.research_bot.manager", - "documentation": {} - }, - { - "label": "Printer", - "kind": 6, - "importPath": "examples.research_bot.printer", - "description": "examples.research_bot.printer", - "peekOfCode": "class Printer:\n def __init__(self, console: Console):\n self.live = Live(console=console)\n self.items: dict[str, tuple[str, bool]] = {}\n self.hide_done_ids: set[str] = set()\n self.live.start()\n def end(self) -> None:\n self.live.stop()\n def hide_done_checkmark(self, item_id: str) -> None:\n self.hide_done_ids.add(item_id)", - "detail": "examples.research_bot.printer", - "documentation": {} - }, - { - "label": "LocalPlaywrightComputer", - "kind": 6, - "importPath": "examples.tools.computer_use", - "description": "examples.tools.computer_use", - "peekOfCode": "class LocalPlaywrightComputer(AsyncComputer):\n \"\"\"A computer, implemented using a local Playwright browser.\"\"\"\n def __init__(self):\n self._playwright: Union[Playwright, None] = None\n self._browser: Union[Browser, None] = None\n self._page: Union[Page, None] = None\n async def _get_browser_and_page(self) -> tuple[Browser, Page]:\n width, height = self.dimensions\n launch_args = [f\"--window-size={width},{height}\"]\n browser = await self.playwright.chromium.launch(headless=False, args=launch_args)", - "detail": "examples.tools.computer_use", - "documentation": {} - }, - { - "label": "CUA_KEY_TO_PLAYWRIGHT_KEY", - "kind": 5, - "importPath": "examples.tools.computer_use", - "description": "examples.tools.computer_use", - "peekOfCode": "CUA_KEY_TO_PLAYWRIGHT_KEY = {\n \"/\": \"Divide\",\n \"\\\\\": \"Backslash\",\n \"alt\": \"Alt\",\n \"arrowdown\": \"ArrowDown\",\n \"arrowleft\": \"ArrowLeft\",\n \"arrowright\": \"ArrowRight\",\n \"arrowup\": \"ArrowUp\",\n \"backspace\": \"Backspace\",\n \"capslock\": \"CapsLock\",", - "detail": "examples.tools.computer_use", - "documentation": {} - }, - { - "label": "open_file", - "kind": 2, - "importPath": "examples.tools.image_generator", - "description": "examples.tools.image_generator", - "peekOfCode": "def open_file(path: str) -> None:\n if sys.platform.startswith(\"darwin\"):\n subprocess.run([\"open\", path], check=False) # macOS\n elif os.name == \"nt\": # Windows\n os.astartfile(path) # type: ignore\n elif os.name == \"posix\":\n subprocess.run([\"xdg-open\", path], check=False) # Linux/Unix\n else:\n print(f\"Don't know how to open files on this platform: {sys.platform}\")\nasync def main():", - "detail": "examples.tools.image_generator", - "documentation": {} - }, - { - "label": "WorkflowCallbacks", - "kind": 6, - "importPath": "examples.voice.static.main", - "description": "examples.voice.static.main", - "peekOfCode": "class WorkflowCallbacks(SingleAgentWorkflowCallbacks):\n def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:\n print(f\"[debug] on_run called with transcription: {transcription}\")\nasync def main():\n pipeline = VoicePipeline(\n workflow=SingleAgentVoiceWorkflow(agent, callbacks=WorkflowCallbacks())\n )\n audio_input = AudioInput(buffer=record_audio())\n result = await pipeline.run(audio_input)\n with AudioPlayer() as player:", - "detail": "examples.voice.static.main", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.voice.static.main", - "description": "examples.voice.static.main", - "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\"\"\"\n print(f\"[debug] get_weather called with city: {city}\")\n choices = [\"sunny\", \"cloudy\", \"rainy\", \"snowy\"]\n return f\"The weather in {city} is {random.choice(choices)}.\"\nspanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",", - "detail": "examples.voice.static.main", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.voice.static.main", - "description": "examples.voice.static.main", - "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",\n ),\n model=\"gpt-4o-mini\",\n)\nagent = Agent(\n name=\"Assistant\",", - "detail": "examples.voice.static.main", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.voice.static.main", - "description": "examples.voice.static.main", - "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.\",\n ),\n model=\"gpt-4o-mini\",\n handoffs=[spanish_agent],\n tools=[get_weather],\n)\nclass WorkflowCallbacks(SingleAgentWorkflowCallbacks):", - "detail": "examples.voice.static.main", - "documentation": {} - }, - { - "label": "AudioPlayer", - "kind": 6, - "importPath": "examples.voice.static.util", - "description": "examples.voice.static.util", - "peekOfCode": "class AudioPlayer:\n def __enter__(self):\n self.stream = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)\n self.stream.start()\n return self\n def __exit__(self, exc_type, exc_value, traceback):\n self.stream.stop() # wait for the stream to finish\n self.stream.close()\n def add_audio(self, audio_data: npt.NDArray[np.int16]):\n self.stream.write(audio_data)", - "detail": "examples.voice.static.util", - "documentation": {} - }, - { - "label": "record_audio", - "kind": 2, - "importPath": "examples.voice.static.util", - "description": "examples.voice.static.util", - "peekOfCode": "def record_audio():\n # Using curses to record audio in a way that:\n # - doesn't require accessibility permissions on macos\n # - doesn't block the terminal\n audio_data = curses.wrapper(_record_audio)\n return audio_data\nclass AudioPlayer:\n def __enter__(self):\n self.stream = sd.OutputStream(samplerate=24000, channels=1, dtype=np.int16)\n self.stream.start()", - "detail": "examples.voice.static.util", - "documentation": {} - }, - { - "label": "Header", - "kind": 6, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "class Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "AudioStatusIndicator", - "kind": 6, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "class AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)\n @override\n def render(self) -> str:\n status = (\n \"🔴 Recording... (Press K to stop)\"\n if self.is_recording\n else \"⚪ Press K to start recording (Q to quit)\"\n )", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "RealtimeApp", - "kind": 6, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "class RealtimeApp(App[None]):\n CSS = \"\"\"\n Screen {\n background: #1a1b26; /* Dark blue-grey background */\n }\n Container {\n border: double rgb(91, 164, 91);\n }\n Horizontal {\n width: 100%;", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "CHUNK_LENGTH_S", - "kind": 5, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "CHUNK_LENGTH_S = 0.05 # 100ms\nSAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "SAMPLE_RATE", - "kind": 5, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "SAMPLE_RATE = 24000\nFORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "FORMAT", - "kind": 5, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "FORMAT = np.int16\nCHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "CHANNELS", - "kind": 5, - "importPath": "examples.voice.streamed.main", - "description": "examples.voice.streamed.main", - "peekOfCode": "CHANNELS = 1\nclass Header(Static):\n \"\"\"A header widget.\"\"\"\n session_id = reactive(\"\")\n @override\n def render(self) -> str:\n return \"Speak to the agent. When you stop speaking, it will respond.\"\nclass AudioStatusIndicator(Static):\n \"\"\"A widget that shows the current audio recording status.\"\"\"\n is_recording = reactive(False)", - "detail": "examples.voice.streamed.main", - "documentation": {} - }, - { - "label": "MyWorkflow", - "kind": 6, - "importPath": "examples.voice.streamed.my_workflow", - "description": "examples.voice.streamed.my_workflow", - "peekOfCode": "class MyWorkflow(VoiceWorkflowBase):\n def __init__(self, secret_word: str, on_start: Callable[[str], None]):\n \"\"\"\n Args:\n secret_word: The secret word to guess.\n on_start: A callback that is called when the workflow starts. The transcription\n is passed in as an argument.\n \"\"\"\n self._input_history: list[TResponseInputItem] = []\n self._current_agent = agent", - "detail": "examples.voice.streamed.my_workflow", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "examples.voice.streamed.my_workflow", - "description": "examples.voice.streamed.my_workflow", - "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\"\"\"\n print(f\"[debug] get_weather called with city: {city}\")\n choices = [\"sunny\", \"cloudy\", \"rainy\", \"snowy\"]\n return f\"The weather in {city} is {random.choice(choices)}.\"\nspanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",", - "detail": "examples.voice.streamed.my_workflow", - "documentation": {} - }, - { - "label": "spanish_agent", - "kind": 5, - "importPath": "examples.voice.streamed.my_workflow", - "description": "examples.voice.streamed.my_workflow", - "peekOfCode": "spanish_agent = Agent(\n name=\"Spanish\",\n handoff_description=\"A spanish speaking agent.\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. Speak in Spanish.\",\n ),\n model=\"gpt-4o-mini\",\n)\nagent = Agent(\n name=\"Assistant\",", - "detail": "examples.voice.streamed.my_workflow", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "examples.voice.streamed.my_workflow", - "description": "examples.voice.streamed.my_workflow", - "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=prompt_with_handoff_instructions(\n \"You're speaking to a human, so be polite and concise. If the user speaks in Spanish, handoff to the spanish agent.\",\n ),\n model=\"gpt-4o-mini\",\n handoffs=[spanish_agent],\n tools=[get_weather],\n)\nclass MyWorkflow(VoiceWorkflowBase):", - "detail": "examples.voice.streamed.my_workflow", - "documentation": {} - }, - { - "label": "LitellmModel", - "kind": 6, - "importPath": "src.agents.extensions.models.litellm_model", - "description": "src.agents.extensions.models.litellm_model", - "peekOfCode": "class LitellmModel(Model):\n \"\"\"This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,\n Anthropic, Gemini, Mistral, and many other models.\n See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).\n \"\"\"\n def __init__(\n self,\n model: str,\n base_url: str | None = None,\n api_key: str | None = None,", - "detail": "src.agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmConverter", - "kind": 6, - "importPath": "src.agents.extensions.models.litellm_model", - "description": "src.agents.extensions.models.litellm_model", - "peekOfCode": "class LitellmConverter:\n @classmethod\n def convert_message_to_openai(\n cls, message: litellm.types.utils.Message\n ) -> ChatCompletionMessage:\n if message.role != \"assistant\":\n raise ModelBehaviorError(f\"Unsupported role: {message.role}\")\n tool_calls = (\n [LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]\n if message.tool_calls", - "detail": "src.agents.extensions.models.litellm_model", - "documentation": {} - }, - { - "label": "LitellmProvider", - "kind": 6, - "importPath": "src.agents.extensions.models.litellm_provider", - "description": "src.agents.extensions.models.litellm_provider", - "peekOfCode": "class LitellmProvider(ModelProvider):\n \"\"\"A ModelProvider that uses LiteLLM to route to any model provider. You can use it via:\n ```python\n Runner.run(agent, input, run_config=RunConfig(model_provider=LitellmProvider()))\n ```\n See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).\n NOTE: API keys must be set via environment variables. If you're using models that require\n additional configuration (e.g. Azure API base or version), those must also be set via the\n environment variables that LiteLLM expects. If you have more advanced needs, we recommend\n copy-pasting this class and making any modifications you need.", - "detail": "src.agents.extensions.models.litellm_provider", - "documentation": {} - }, - { - "label": "remove_all_tools", - "kind": 2, - "importPath": "src.agents.extensions.handoff_filters", - "description": "src.agents.extensions.handoff_filters", - "peekOfCode": "def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData:\n \"\"\"Filters out all tool items: file search, web search and function calls+output.\"\"\"\n history = handoff_input_data.input_history\n new_items = handoff_input_data.new_items\n filtered_history = (\n _remove_tool_types_from_input(history) if isinstance(history, tuple) else history\n )\n filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items)\n filtered_new_items = _remove_tools_from_items(new_items)\n return HandoffInputData(", - "detail": "src.agents.extensions.handoff_filters", - "documentation": {} - }, - { - "label": "prompt_with_handoff_instructions", - "kind": 2, - "importPath": "src.agents.extensions.handoff_prompt", - "description": "src.agents.extensions.handoff_prompt", - "peekOfCode": "def prompt_with_handoff_instructions(prompt: str) -> str:\n \"\"\"\n Add recommended instructions to the prompt for agents that use handoffs.\n \"\"\"\n return f\"{RECOMMENDED_PROMPT_PREFIX}\\n\\n{prompt}\"", - "detail": "src.agents.extensions.handoff_prompt", - "documentation": {} - }, - { - "label": "RECOMMENDED_PROMPT_PREFIX", - "kind": 5, - "importPath": "src.agents.extensions.handoff_prompt", - "description": "src.agents.extensions.handoff_prompt", - "peekOfCode": "RECOMMENDED_PROMPT_PREFIX = (\n \"# System context\\n\"\n \"You are part of a multi-agent system called the Agents SDK, designed to make agent \"\n \"coordination and execution easy. Agents uses two primary abstraction: **Agents** and \"\n \"**Handoffs**. An agent encompasses instructions and tools and can hand off a \"\n \"conversation to another agent when appropriate. \"\n \"Handoffs are achieved by calling a handoff function, generally named \"\n \"`transfer_to_`. Transfers between agents are handled seamlessly in the background;\"\n \" do not mention or draw attention to these transfers in your conversation with the user.\\n\"\n)", - "detail": "src.agents.extensions.handoff_prompt", - "documentation": {} - }, - { - "label": "get_main_graph", - "kind": 2, - "importPath": "src.agents.extensions.visualization", - "description": "src.agents.extensions.visualization", - "peekOfCode": "def get_main_graph(agent: Agent) -> str:\n \"\"\"\n Generates the main graph structure in DOT format for the given agent.\n Args:\n agent (Agent): The agent for which the graph is to be generated.\n Returns:\n str: The DOT format string representing the graph.\n \"\"\"\n parts = [\n \"\"\"", - "detail": "src.agents.extensions.visualization", - "documentation": {} - }, - { - "label": "get_all_nodes", - "kind": 2, - "importPath": "src.agents.extensions.visualization", - "description": "src.agents.extensions.visualization", - "peekOfCode": "def get_all_nodes(\n agent: Agent, parent: Agent | None = None, visited: set[str] | None = None\n) -> str:\n \"\"\"\n Recursively generates the nodes for the given agent and its handoffs in DOT format.\n Args:\n agent (Agent): The agent for which the nodes are to be generated.\n Returns:\n str: The DOT format string representing the nodes.\n \"\"\"", - "detail": "src.agents.extensions.visualization", - "documentation": {} - }, - { - "label": "get_all_edges", - "kind": 2, - "importPath": "src.agents.extensions.visualization", - "description": "src.agents.extensions.visualization", - "peekOfCode": "def get_all_edges(\n agent: Agent, parent: Agent | None = None, visited: set[str] | None = None\n) -> str:\n \"\"\"\n Recursively generates the edges for the given agent and its handoffs in DOT format.\n Args:\n agent (Agent): The agent for which the edges are to be generated.\n parent (Agent, optional): The parent agent. Defaults to None.\n Returns:\n str: The DOT format string representing the edges.", - "detail": "src.agents.extensions.visualization", - "documentation": {} - }, - { - "label": "draw_graph", - "kind": 2, - "importPath": "src.agents.extensions.visualization", - "description": "src.agents.extensions.visualization", - "peekOfCode": "def draw_graph(agent: Agent, filename: str | None = None) -> graphviz.Source:\n \"\"\"\n Draws the graph for the given agent and optionally saves it as a PNG file.\n Args:\n agent (Agent): The agent for which the graph is to be drawn.\n filename (str): The name of the file to save the graph as a PNG.\n Returns:\n graphviz.Source: The graphviz Source object representing the graph.\n \"\"\"\n dot_code = get_main_graph(agent)", - "detail": "src.agents.extensions.visualization", - "documentation": {} - }, - { - "label": "MCPServer", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServer(abc.ABC):\n \"\"\"Base class for Model Context Protocol servers.\"\"\"\n @abc.abstractmethod\n async def connect(self):\n \"\"\"Connect to the server. For example, this might mean spawning a subprocess or\n opening a network connection. The server is expected to remain connected until\n `cleanup()` is called.\n \"\"\"\n pass\n @property", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "_MCPServerWithClientSession", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class _MCPServerWithClientSession(MCPServer, abc.ABC):\n \"\"\"Base class for MCP servers that use a `ClientSession` to communicate with the server.\"\"\"\n def __init__(\n self,\n cache_tools_list: bool,\n client_session_timeout_seconds: float | None,\n tool_filter: ToolFilter = None,\n ):\n \"\"\"\n Args:", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerStdioParams", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerStdioParams(TypedDict):\n \"\"\"Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another\n import.\n \"\"\"\n command: str\n \"\"\"The executable to run to start the server. For example, `python` or `node`.\"\"\"\n args: NotRequired[list[str]]\n \"\"\"Command line args to pass to the `command` executable. For example, `['foo.py']` or\n `['server.js', '--port', '8080']`.\"\"\"\n env: NotRequired[dict[str, str]]", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerStdio", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerStdio(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the stdio transport. See the [spec]\n (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for\n details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerStdioParams,\n cache_tools_list: bool = False,\n name: str | None = None,", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerSseParams", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerSseParams(TypedDict):\n \"\"\"Mirrors the params in`mcp.client.sse.sse_client`.\"\"\"\n url: str\n \"\"\"The URL of the server.\"\"\"\n headers: NotRequired[dict[str, str]]\n \"\"\"The headers to send to the server.\"\"\"\n timeout: NotRequired[float]\n \"\"\"The timeout for the HTTP request. Defaults to 5 seconds.\"\"\"\n sse_read_timeout: NotRequired[float]\n \"\"\"The timeout for the SSE connection, in seconds. Defaults to 5 minutes.\"\"\"", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerSse", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerSse(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the HTTP with SSE transport. See the [spec]\n (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse)\n for details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerSseParams,\n cache_tools_list: bool = False,\n name: str | None = None,", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerStreamableHttpParams", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerStreamableHttpParams(TypedDict):\n \"\"\"Mirrors the params in`mcp.client.streamable_http.streamablehttp_client`.\"\"\"\n url: str\n \"\"\"The URL of the server.\"\"\"\n headers: NotRequired[dict[str, str]]\n \"\"\"The headers to send to the server.\"\"\"\n timeout: NotRequired[timedelta | float]\n \"\"\"The timeout for the HTTP request. Defaults to 5 seconds.\"\"\"\n sse_read_timeout: NotRequired[timedelta | float]\n \"\"\"The timeout for the SSE connection, in seconds. Defaults to 5 minutes.\"\"\"", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "MCPServerStreamableHttp", - "kind": 6, - "importPath": "src.agents.mcp.server", - "description": "src.agents.mcp.server", - "peekOfCode": "class MCPServerStreamableHttp(_MCPServerWithClientSession):\n \"\"\"MCP server implementation that uses the Streamable HTTP transport. See the [spec]\n (https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http)\n for details.\n \"\"\"\n def __init__(\n self,\n params: MCPServerStreamableHttpParams,\n cache_tools_list: bool = False,\n name: str | None = None,", - "detail": "src.agents.mcp.server", - "documentation": {} - }, - { - "label": "ToolFilterContext", - "kind": 6, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "class ToolFilterContext:\n \"\"\"Context information available to tool filter functions.\"\"\"\n run_context: RunContextWrapper[Any]\n \"\"\"The current run context.\"\"\"\n agent: \"AgentBase\"\n \"\"\"The agent that is requesting the tool list.\"\"\"\n server_name: str\n \"\"\"The name of the MCP server.\"\"\"\nToolFilterCallable = Callable[[\"ToolFilterContext\", \"MCPTool\"], MaybeAwaitable[bool]]\n\"\"\"A function that determines whether a tool should be available.", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "ToolFilterStatic", - "kind": 6, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "class ToolFilterStatic(TypedDict):\n \"\"\"Static tool filter configuration using allowlists and blocklists.\"\"\"\n allowed_tool_names: NotRequired[list[str]]\n \"\"\"Optional list of tool names to allow (whitelist).\n If set, only these tools will be available.\"\"\"\n blocked_tool_names: NotRequired[list[str]]\n \"\"\"Optional list of tool names to exclude (blacklist).\n If set, these tools will be filtered out.\"\"\"\nToolFilter = Union[ToolFilterCallable, ToolFilterStatic, None]\n\"\"\"A tool filter that can be either a function, static configuration, or None (no filtering).\"\"\"", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "MCPUtil", - "kind": 6, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "class MCPUtil:\n \"\"\"Set of utilities for interop between MCP and Agents SDK tools.\"\"\"\n @classmethod\n async def get_all_function_tools(\n cls,\n servers: list[\"MCPServer\"],\n convert_schemas_to_strict: bool,\n run_context: RunContextWrapper[Any],\n agent: \"AgentBase\",\n ) -> list[Tool]:", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "create_static_tool_filter", - "kind": 2, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "def create_static_tool_filter(\n allowed_tool_names: Optional[list[str]] = None,\n blocked_tool_names: Optional[list[str]] = None,\n) -> Optional[ToolFilterStatic]:\n \"\"\"Create a static tool filter from allowlist and blocklist parameters.\n This is a convenience function for creating a ToolFilterStatic.\n Args:\n allowed_tool_names: Optional list of tool names to allow (whitelist).\n blocked_tool_names: Optional list of tool names to exclude (blacklist).\n Returns:", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "ToolFilterCallable", - "kind": 5, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "ToolFilterCallable = Callable[[\"ToolFilterContext\", \"MCPTool\"], MaybeAwaitable[bool]]\n\"\"\"A function that determines whether a tool should be available.\nArgs:\n context: The context information including run context, agent, and server name.\n tool: The MCP tool to filter.\nReturns:\n Whether the tool should be available (True) or filtered out (False).\n\"\"\"\nclass ToolFilterStatic(TypedDict):\n \"\"\"Static tool filter configuration using allowlists and blocklists.\"\"\"", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "ToolFilter", - "kind": 5, - "importPath": "src.agents.mcp.util", - "description": "src.agents.mcp.util", - "peekOfCode": "ToolFilter = Union[ToolFilterCallable, ToolFilterStatic, None]\n\"\"\"A tool filter that can be either a function, static configuration, or None (no filtering).\"\"\"\ndef create_static_tool_filter(\n allowed_tool_names: Optional[list[str]] = None,\n blocked_tool_names: Optional[list[str]] = None,\n) -> Optional[ToolFilterStatic]:\n \"\"\"Create a static tool filter from allowlist and blocklist parameters.\n This is a convenience function for creating a ToolFilterStatic.\n Args:\n allowed_tool_names: Optional list of tool names to allow (whitelist).", - "detail": "src.agents.mcp.util", - "documentation": {} - }, - { - "label": "Session", - "kind": 6, - "importPath": "src.agents.memory.session", - "description": "src.agents.memory.session", - "peekOfCode": "class Session(Protocol):\n \"\"\"Protocol for session implementations.\n Session stores conversation history for a specific session, allowing\n agents to maintain context without requiring explicit manual memory management.\n \"\"\"\n session_id: str\n async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:\n \"\"\"Retrieve the conversation history for this session.\n Args:\n limit: Maximum number of items to retrieve. If None, retrieves all items.", - "detail": "src.agents.memory.session", - "documentation": {} - }, - { - "label": "SessionABC", - "kind": 6, - "importPath": "src.agents.memory.session", - "description": "src.agents.memory.session", - "peekOfCode": "class SessionABC(ABC):\n \"\"\"Abstract base class for session implementations.\n Session stores conversation history for a specific session, allowing\n agents to maintain context without requiring explicit manual memory management.\n This ABC is intended for internal use and as a base class for concrete implementations.\n Third-party libraries should implement the Session protocol instead.\n \"\"\"\n session_id: str\n @abstractmethod\n async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:", - "detail": "src.agents.memory.session", - "documentation": {} - }, - { - "label": "SQLiteSession", - "kind": 6, - "importPath": "src.agents.memory.session", - "description": "src.agents.memory.session", - "peekOfCode": "class SQLiteSession(SessionABC):\n \"\"\"SQLite-based implementation of session storage.\n This implementation stores conversation history in a SQLite database.\n By default, uses an in-memory database that is lost when the process ends.\n For persistent storage, provide a file path.\n \"\"\"\n def __init__(\n self,\n session_id: str,\n db_path: str | Path = \":memory:\",", - "detail": "src.agents.memory.session", - "documentation": {} - }, - { - "label": "set_default_openai_key", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def set_default_openai_key(key: str) -> None:\n global _default_openai_key\n _default_openai_key = key\ndef get_default_openai_key() -> str | None:\n return _default_openai_key\ndef set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "get_default_openai_key", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def get_default_openai_key() -> str | None:\n return _default_openai_key\ndef set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "set_default_openai_client", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def set_default_openai_client(client: AsyncOpenAI) -> None:\n global _default_openai_client\n _default_openai_client = client\ndef get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "get_default_openai_client", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def get_default_openai_client() -> AsyncOpenAI | None:\n return _default_openai_client\ndef set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "set_use_responses_by_default", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def set_use_responses_by_default(use_responses: bool) -> None:\n global _use_responses_by_default\n _use_responses_by_default = use_responses\ndef get_use_responses_by_default() -> bool:\n return _use_responses_by_default", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "get_use_responses_by_default", - "kind": 2, - "importPath": "src.agents.models._openai_shared", - "description": "src.agents.models._openai_shared", - "peekOfCode": "def get_use_responses_by_default() -> bool:\n return _use_responses_by_default", - "detail": "src.agents.models._openai_shared", - "documentation": {} - }, - { - "label": "Converter", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_converter", - "description": "src.agents.models.chatcmpl_converter", - "peekOfCode": "class Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> ChatCompletionToolChoiceOptionParam | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN\n elif isinstance(tool_choice, MCPToolChoice):\n raise UserError(\"MCPToolChoice is not supported for Chat Completions models\")\n elif tool_choice == \"auto\":", - "detail": "src.agents.models.chatcmpl_converter", - "documentation": {} - }, - { - "label": "ChatCmplHelpers", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_helpers", - "description": "src.agents.models.chatcmpl_helpers", - "peekOfCode": "class ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None\n return model_settings.store if model_settings.store is not None else default_store\n @classmethod", - "detail": "src.agents.models.chatcmpl_helpers", - "documentation": {} - }, - { - "label": "_USER_AGENT", - "kind": 5, - "importPath": "src.agents.models.chatcmpl_helpers", - "description": "src.agents.models.chatcmpl_helpers", - "peekOfCode": "_USER_AGENT = f\"Agents/Python {__version__}\"\nHEADERS = {\"User-Agent\": _USER_AGENT}\nclass ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None", - "detail": "src.agents.models.chatcmpl_helpers", - "documentation": {} - }, - { - "label": "HEADERS", - "kind": 5, - "importPath": "src.agents.models.chatcmpl_helpers", - "description": "src.agents.models.chatcmpl_helpers", - "peekOfCode": "HEADERS = {\"User-Agent\": _USER_AGENT}\nclass ChatCmplHelpers:\n @classmethod\n def is_openai(cls, client: AsyncOpenAI):\n return str(client.base_url).startswith(\"https://api.openai.com\")\n @classmethod\n def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None:\n # Match the behavior of Responses where store is True when not given\n default_store = True if cls.is_openai(client) else None\n return model_settings.store if model_settings.store is not None else default_store", - "detail": "src.agents.models.chatcmpl_helpers", - "documentation": {} - }, - { - "label": "Part", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_stream_handler", - "description": "src.agents.models.chatcmpl_stream_handler", - "peekOfCode": "class Part:\n def __init__(self, text: str, type: str):\n self.text = text\n self.type = type\n@dataclass\nclass StreamingState:\n started: bool = False\n text_content_index_and_output: tuple[int, ResponseOutputText] | None = None\n refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None\n reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None", - "detail": "src.agents.models.chatcmpl_stream_handler", - "documentation": {} - }, - { - "label": "StreamingState", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_stream_handler", - "description": "src.agents.models.chatcmpl_stream_handler", - "peekOfCode": "class StreamingState:\n started: bool = False\n text_content_index_and_output: tuple[int, ResponseOutputText] | None = None\n refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None\n reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None\n function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)\nclass SequenceNumber:\n def __init__(self):\n self._sequence_number = 0\n def get_and_increment(self) -> int:", - "detail": "src.agents.models.chatcmpl_stream_handler", - "documentation": {} - }, - { - "label": "SequenceNumber", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_stream_handler", - "description": "src.agents.models.chatcmpl_stream_handler", - "peekOfCode": "class SequenceNumber:\n def __init__(self):\n self._sequence_number = 0\n def get_and_increment(self) -> int:\n num = self._sequence_number\n self._sequence_number += 1\n return num\nclass ChatCmplStreamHandler:\n @classmethod\n async def handle_stream(", - "detail": "src.agents.models.chatcmpl_stream_handler", - "documentation": {} - }, - { - "label": "ChatCmplStreamHandler", - "kind": 6, - "importPath": "src.agents.models.chatcmpl_stream_handler", - "description": "src.agents.models.chatcmpl_stream_handler", - "peekOfCode": "class ChatCmplStreamHandler:\n @classmethod\n async def handle_stream(\n cls,\n response: Response,\n stream: AsyncStream[ChatCompletionChunk],\n ) -> AsyncIterator[TResponseStreamEvent]:\n usage: CompletionUsage | None = None\n state = StreamingState()\n sequence_number = SequenceNumber()", - "detail": "src.agents.models.chatcmpl_stream_handler", - "documentation": {} - }, - { - "label": "FAKE_RESPONSES_ID", - "kind": 5, - "importPath": "src.agents.models.fake_id", - "description": "src.agents.models.fake_id", - "peekOfCode": "FAKE_RESPONSES_ID = \"__fake_id__\"\n\"\"\"This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's\nuseful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat\nCompletions API or other LLM providers.\n\"\"\"", - "detail": "src.agents.models.fake_id", - "documentation": {} - }, - { - "label": "ModelTracing", - "kind": 6, - "importPath": "src.agents.models.interface", - "description": "src.agents.models.interface", - "peekOfCode": "class ModelTracing(enum.Enum):\n DISABLED = 0\n \"\"\"Tracing is disabled entirely.\"\"\"\n ENABLED = 1\n \"\"\"Tracing is enabled, and all data is included.\"\"\"\n ENABLED_WITHOUT_DATA = 2\n \"\"\"Tracing is enabled, but inputs/outputs are not included.\"\"\"\n def is_disabled(self) -> bool:\n return self == ModelTracing.DISABLED\n def include_data(self) -> bool:", - "detail": "src.agents.models.interface", - "documentation": {} - }, - { - "label": "Model", - "kind": 6, - "importPath": "src.agents.models.interface", - "description": "src.agents.models.interface", - "peekOfCode": "class Model(abc.ABC):\n \"\"\"The base interface for calling an LLM.\"\"\"\n @abc.abstractmethod\n async def get_response(\n self,\n system_instructions: str | None,\n input: str | list[TResponseInputItem],\n model_settings: ModelSettings,\n tools: list[Tool],\n output_schema: AgentOutputSchemaBase | None,", - "detail": "src.agents.models.interface", - "documentation": {} - }, - { - "label": "ModelProvider", - "kind": 6, - "importPath": "src.agents.models.interface", - "description": "src.agents.models.interface", - "peekOfCode": "class ModelProvider(abc.ABC):\n \"\"\"The base interface for a model provider.\n Model provider is responsible for looking up Models by name.\n \"\"\"\n @abc.abstractmethod\n def get_model(self, model_name: str | None) -> Model:\n \"\"\"Get a model by name.\n Args:\n model_name: The name of the model to get.\n Returns:", - "detail": "src.agents.models.interface", - "documentation": {} - }, - { - "label": "MultiProviderMap", - "kind": 6, - "importPath": "src.agents.models.multi_provider", - "description": "src.agents.models.multi_provider", - "peekOfCode": "class MultiProviderMap:\n \"\"\"A map of model name prefixes to ModelProviders.\"\"\"\n def __init__(self):\n self._mapping: dict[str, ModelProvider] = {}\n def has_prefix(self, prefix: str) -> bool:\n \"\"\"Returns True if the given prefix is in the mapping.\"\"\"\n return prefix in self._mapping\n def get_mapping(self) -> dict[str, ModelProvider]:\n \"\"\"Returns a copy of the current prefix -> ModelProvider mapping.\"\"\"\n return self._mapping.copy()", - "detail": "src.agents.models.multi_provider", - "documentation": {} - }, - { - "label": "MultiProvider", - "kind": 6, - "importPath": "src.agents.models.multi_provider", - "description": "src.agents.models.multi_provider", - "peekOfCode": "class MultiProvider(ModelProvider):\n \"\"\"This ModelProvider maps to a Model based on the prefix of the model name. By default, the\n mapping is:\n - \"openai/\" prefix or no prefix -> OpenAIProvider. e.g. \"openai/gpt-4.1\", \"gpt-4.1\"\n - \"litellm/\" prefix -> LitellmProvider. e.g. \"litellm/openai/gpt-4.1\"\n You can override or customize this mapping.\n \"\"\"\n def __init__(\n self,\n *,", - "detail": "src.agents.models.multi_provider", - "documentation": {} - }, - { - "label": "OpenAIChatCompletionsModel", - "kind": 6, - "importPath": "src.agents.models.openai_chatcompletions", - "description": "src.agents.models.openai_chatcompletions", - "peekOfCode": "class OpenAIChatCompletionsModel(Model):\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:\n self.model = model\n self._client = openai_client\n def _non_null_or_not_given(self, value: Any) -> Any:\n return value if value is not None else NOT_GIVEN", - "detail": "src.agents.models.openai_chatcompletions", - "documentation": {} - }, - { - "label": "OpenAIProvider", - "kind": 6, - "importPath": "src.agents.models.openai_provider", - "description": "src.agents.models.openai_provider", - "peekOfCode": "class OpenAIProvider(ModelProvider):\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,\n project: str | None = None,\n use_responses: bool | None = None,", - "detail": "src.agents.models.openai_provider", - "documentation": {} - }, - { - "label": "shared_http_client", - "kind": 2, - "importPath": "src.agents.models.openai_provider", - "description": "src.agents.models.openai_provider", - "peekOfCode": "def shared_http_client() -> httpx.AsyncClient:\n global _http_client\n if _http_client is None:\n _http_client = DefaultAsyncHttpxClient()\n return _http_client\nclass OpenAIProvider(ModelProvider):\n def __init__(\n self,\n *,\n api_key: str | None = None,", - "detail": "src.agents.models.openai_provider", - "documentation": {} - }, - { - "label": "OpenAIResponsesModel", - "kind": 6, - "importPath": "src.agents.models.openai_responses", - "description": "src.agents.models.openai_responses", - "peekOfCode": "class OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:\n self.model = model", - "detail": "src.agents.models.openai_responses", - "documentation": {} - }, - { - "label": "ConvertedTools", - "kind": 6, - "importPath": "src.agents.models.openai_responses", - "description": "src.agents.models.openai_responses", - "peekOfCode": "class ConvertedTools:\n tools: list[ToolParam]\n includes: list[ResponseIncludable]\nclass Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> response_create_params.ToolChoice | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN", - "detail": "src.agents.models.openai_responses", - "documentation": {} - }, - { - "label": "Converter", - "kind": 6, - "importPath": "src.agents.models.openai_responses", - "description": "src.agents.models.openai_responses", - "peekOfCode": "class Converter:\n @classmethod\n def convert_tool_choice(\n cls, tool_choice: Literal[\"auto\", \"required\", \"none\"] | str | MCPToolChoice | None\n ) -> response_create_params.ToolChoice | NotGiven:\n if tool_choice is None:\n return NOT_GIVEN\n elif isinstance(tool_choice, MCPToolChoice):\n return {\n \"server_label\": tool_choice.server_label,", - "detail": "src.agents.models.openai_responses", - "documentation": {} - }, - { - "label": "_USER_AGENT", - "kind": 5, - "importPath": "src.agents.models.openai_responses", - "description": "src.agents.models.openai_responses", - "peekOfCode": "_USER_AGENT = f\"Agents/Python {__version__}\"\n_HEADERS = {\"User-Agent\": _USER_AGENT}\nclass OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,", - "detail": "src.agents.models.openai_responses", - "documentation": {} - }, - { - "label": "_HEADERS", - "kind": 5, - "importPath": "src.agents.models.openai_responses", - "description": "src.agents.models.openai_responses", - "peekOfCode": "_HEADERS = {\"User-Agent\": _USER_AGENT}\nclass OpenAIResponsesModel(Model):\n \"\"\"\n Implementation of `Model` that uses the OpenAI Responses API.\n \"\"\"\n def __init__(\n self,\n model: str | ChatModel,\n openai_client: AsyncOpenAI,\n ) -> None:", - "detail": "src.agents.models.openai_responses", - "documentation": {} - }, - { - "label": "RealtimeAgent", - "kind": 6, - "importPath": "src.agents.realtime.agent", - "description": "src.agents.realtime.agent", - "peekOfCode": "class RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `modelSettings` is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `outputType` is not supported, as RealtimeAgents do not support structured outputs.\n - `toolUseBehavior` is not supported, as all RealtimeAgents will be handled by the same model", - "detail": "src.agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeAgentHooks", - "kind": 5, - "importPath": "src.agents.realtime.agent", - "description": "src.agents.realtime.agent", - "peekOfCode": "RealtimeAgentHooks = AgentHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Agent hooks for `RealtimeAgent`s.\"\"\"\nRealtimeRunHooks = RunHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Run hooks for `RealtimeAgent`s.\"\"\"\n@dataclass\nclass RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model", - "detail": "src.agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeRunHooks", - "kind": 5, - "importPath": "src.agents.realtime.agent", - "description": "src.agents.realtime.agent", - "peekOfCode": "RealtimeRunHooks = RunHooksBase[TContext, \"RealtimeAgent[TContext]\"]\n\"\"\"Run hooks for `RealtimeAgent`s.\"\"\"\n@dataclass\nclass RealtimeAgent(AgentBase, Generic[TContext]):\n \"\"\"A specialized agent instance that is meant to be used within a `RealtimeSession` to build\n voice agents. Due to the nature of this agent, some configuration options are not supported\n that are supported by regular `Agent` instances. For example:\n - `model` choice is not supported, as all RealtimeAgents will be handled by the same model\n within a `RealtimeSession`.\n - `modelSettings` is not supported, as all RealtimeAgents will be handled by the same model", - "detail": "src.agents.realtime.agent", - "documentation": {} - }, - { - "label": "RealtimeClientMessage", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeClientMessage(TypedDict):\n \"\"\"A raw message to be sent to the model.\"\"\"\n type: str # explicitly required\n other_data: NotRequired[dict[str, Any]]\n \"\"\"Merged into the message body.\"\"\"\nclass RealtimeInputAudioTranscriptionConfig(TypedDict):\n language: NotRequired[str]\n model: NotRequired[Literal[\"gpt-4o-transcribe\", \"gpt-4o-mini-transcribe\", \"whisper-1\"] | str]\n prompt: NotRequired[str]\nclass RealtimeTurnDetectionConfig(TypedDict):", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeInputAudioTranscriptionConfig", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeInputAudioTranscriptionConfig(TypedDict):\n language: NotRequired[str]\n model: NotRequired[Literal[\"gpt-4o-transcribe\", \"gpt-4o-mini-transcribe\", \"whisper-1\"] | str]\n prompt: NotRequired[str]\nclass RealtimeTurnDetectionConfig(TypedDict):\n \"\"\"Turn detection config. Allows extra vendor keys if needed.\"\"\"\n type: NotRequired[Literal[\"semantic_vad\", \"server_vad\"]]\n create_response: NotRequired[bool]\n eagerness: NotRequired[Literal[\"auto\", \"low\", \"medium\", \"high\"]]\n interrupt_response: NotRequired[bool]", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeTurnDetectionConfig", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeTurnDetectionConfig(TypedDict):\n \"\"\"Turn detection config. Allows extra vendor keys if needed.\"\"\"\n type: NotRequired[Literal[\"semantic_vad\", \"server_vad\"]]\n create_response: NotRequired[bool]\n eagerness: NotRequired[Literal[\"auto\", \"low\", \"medium\", \"high\"]]\n interrupt_response: NotRequired[bool]\n prefix_padding_ms: NotRequired[int]\n silence_duration_ms: NotRequired[int]\n threshold: NotRequired[float]\nclass RealtimeSessionModelSettings(TypedDict):", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeSessionModelSettings", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeSessionModelSettings(TypedDict):\n \"\"\"Model settings for a realtime model session.\"\"\"\n model_name: NotRequired[RealtimeModelName]\n instructions: NotRequired[str]\n modalities: NotRequired[list[Literal[\"text\", \"audio\"]]]\n voice: NotRequired[str]\n input_audio_format: NotRequired[RealtimeAudioFormat]\n output_audio_format: NotRequired[RealtimeAudioFormat]\n input_audio_transcription: NotRequired[RealtimeInputAudioTranscriptionConfig]\n turn_detection: NotRequired[RealtimeTurnDetectionConfig]", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeGuardrailsSettings", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeGuardrailsSettings(TypedDict):\n \"\"\"Settings for output guardrails in realtime sessions.\"\"\"\n debounce_text_length: NotRequired[int]\n \"\"\"\n The minimum number of characters to accumulate before running guardrails on transcript\n deltas. Defaults to 100. Guardrails run every time the accumulated text reaches\n 1x, 2x, 3x, etc. times this threshold.\n \"\"\"\nclass RealtimeModelTracingConfig(TypedDict):\n \"\"\"Configuration for tracing in realtime model sessions.\"\"\"", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeModelTracingConfig", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeModelTracingConfig(TypedDict):\n \"\"\"Configuration for tracing in realtime model sessions.\"\"\"\n workflow_name: NotRequired[str]\n \"\"\"The workflow name to use for tracing.\"\"\"\n group_id: NotRequired[str]\n \"\"\"A group identifier to use for tracing, to link multiple traces together.\"\"\"\n metadata: NotRequired[dict[str, Any]]\n \"\"\"Additional metadata to include with the trace.\"\"\"\nclass RealtimeRunConfig(TypedDict):\n model_settings: NotRequired[RealtimeSessionModelSettings]", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeRunConfig", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeRunConfig(TypedDict):\n model_settings: NotRequired[RealtimeSessionModelSettings]\n output_guardrails: NotRequired[list[OutputGuardrail[Any]]]\n \"\"\"List of output guardrails to run on the agent's responses.\"\"\"\n guardrails_settings: NotRequired[RealtimeGuardrailsSettings]\n \"\"\"Settings for guardrail execution.\"\"\"\n tracing_disabled: NotRequired[bool]\n \"\"\"Whether tracing is disabled for this run.\"\"\"\n # TODO (rm) Add history audio storage config\nclass RealtimeUserInputText(TypedDict):", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeUserInputText", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeUserInputText(TypedDict):\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeUserInputMessage(TypedDict):\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeUserInputText]\nRealtimeUserInput: TypeAlias = Union[str, RealtimeUserInputMessage]", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeUserInputMessage", - "kind": 6, - "importPath": "src.agents.realtime.config", - "description": "src.agents.realtime.config", - "peekOfCode": "class RealtimeUserInputMessage(TypedDict):\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeUserInputText]\nRealtimeUserInput: TypeAlias = Union[str, RealtimeUserInputMessage]", - "detail": "src.agents.realtime.config", - "documentation": {} - }, - { - "label": "RealtimeEventInfo", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeEventInfo:\n context: RunContextWrapper\n \"\"\"The context for the event.\"\"\"\n@dataclass\nclass RealtimeAgentStartEvent:\n \"\"\"A new agent has started.\"\"\"\n agent: RealtimeAgent\n \"\"\"The new agent.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAgentStartEvent", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeAgentStartEvent:\n \"\"\"A new agent has started.\"\"\"\n agent: RealtimeAgent\n \"\"\"The new agent.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"agent_start\"] = \"agent_start\"\n@dataclass\nclass RealtimeAgentEndEvent:\n \"\"\"An agent has ended.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAgentEndEvent", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeAgentEndEvent:\n \"\"\"An agent has ended.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that ended.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"agent_end\"] = \"agent_end\"\n@dataclass\nclass RealtimeHandoffEvent:\n \"\"\"An agent has handed off to another agent.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeHandoffEvent", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeHandoffEvent:\n \"\"\"An agent has handed off to another agent.\"\"\"\n from_agent: RealtimeAgent\n \"\"\"The agent that handed off.\"\"\"\n to_agent: RealtimeAgent\n \"\"\"The agent that was handed off to.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"handoff\"] = \"handoff\"\n@dataclass", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeToolStart", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeToolStart:\n \"\"\"An agent is starting a tool call.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that updated.\"\"\"\n tool: Tool\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"tool_start\"] = \"tool_start\"\n@dataclass\nclass RealtimeToolEnd:", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeToolEnd", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeToolEnd:\n \"\"\"An agent has ended a tool call.\"\"\"\n agent: RealtimeAgent\n \"\"\"The agent that ended the tool call.\"\"\"\n tool: Tool\n \"\"\"The tool that was called.\"\"\"\n output: Any\n \"\"\"The output of the tool call.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeRawModelEvent", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeRawModelEvent:\n \"\"\"Forwards raw events from the model layer.\"\"\"\n data: RealtimeModelEvent\n \"\"\"The raw data from the model layer.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"raw_model_event\"] = \"raw_model_event\"\n@dataclass\nclass RealtimeAudioEnd:\n \"\"\"Triggered when the agent stops generating audio.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudioEnd", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeAudioEnd:\n \"\"\"Triggered when the agent stops generating audio.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio_end\"] = \"audio_end\"\n@dataclass\nclass RealtimeAudio:\n \"\"\"Triggered when the agent generates new audio to be played.\"\"\"\n audio: RealtimeModelAudioEvent\n \"\"\"The audio event from the model layer.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudio", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeAudio:\n \"\"\"Triggered when the agent generates new audio to be played.\"\"\"\n audio: RealtimeModelAudioEvent\n \"\"\"The audio event from the model layer.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio\"] = \"audio\"\n@dataclass\nclass RealtimeAudioInterrupted:\n \"\"\"Triggered when the agent is interrupted. Can be listened to by the user to stop audio", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeAudioInterrupted", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeAudioInterrupted:\n \"\"\"Triggered when the agent is interrupted. Can be listened to by the user to stop audio\n playback or give visual indicators to the user.\n \"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass\nclass RealtimeError:\n \"\"\"An error has occurred.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeError", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeError:\n \"\"\"An error has occurred.\"\"\"\n error: Any\n \"\"\"The error that occurred.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"error\"] = \"error\"\n@dataclass\nclass RealtimeHistoryUpdated:\n \"\"\"The history has been updated. Contains the full history of the session.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeHistoryUpdated", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeHistoryUpdated:\n \"\"\"The history has been updated. Contains the full history of the session.\"\"\"\n history: list[RealtimeItem]\n \"\"\"The full history of the session.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"history_updated\"] = \"history_updated\"\n@dataclass\nclass RealtimeHistoryAdded:\n \"\"\"A new item has been added to the history.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeHistoryAdded", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeHistoryAdded:\n \"\"\"A new item has been added to the history.\"\"\"\n item: RealtimeItem\n \"\"\"The new item that was added to the history.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"history_added\"] = \"history_added\"\n@dataclass\nclass RealtimeGuardrailTripped:\n \"\"\"A guardrail has been tripped and the agent has been interrupted.\"\"\"", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "RealtimeGuardrailTripped", - "kind": 6, - "importPath": "src.agents.realtime.events", - "description": "src.agents.realtime.events", - "peekOfCode": "class RealtimeGuardrailTripped:\n \"\"\"A guardrail has been tripped and the agent has been interrupted.\"\"\"\n guardrail_results: list[OutputGuardrailResult]\n \"\"\"The results from all triggered guardrails.\"\"\"\n message: str\n \"\"\"The message that was being generated when the guardrail was triggered.\"\"\"\n info: RealtimeEventInfo\n \"\"\"Common info for all events, such as the context.\"\"\"\n type: Literal[\"guardrail_tripped\"] = \"guardrail_tripped\"\nRealtimeSessionEvent: TypeAlias = Union[", - "detail": "src.agents.realtime.events", - "documentation": {} - }, - { - "label": "InputText", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class InputText(BaseModel):\n type: Literal[\"input_text\"] = \"input_text\"\n text: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass InputAudio(BaseModel):\n type: Literal[\"input_audio\"] = \"input_audio\"\n audio: str | None = None\n transcript: str | None = None\n # Allow extra data", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "InputAudio", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class InputAudio(BaseModel):\n type: Literal[\"input_audio\"] = \"input_audio\"\n audio: str | None = None\n transcript: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass AssistantText(BaseModel):\n type: Literal[\"text\"] = \"text\"\n text: str | None = None\n # Allow extra data", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "AssistantText", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class AssistantText(BaseModel):\n type: Literal[\"text\"] = \"text\"\n text: str | None = None\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass SystemMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"system\"] = \"system\"", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "SystemMessageItem", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class SystemMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"system\"] = \"system\"\n content: list[InputText]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass UserMessageItem(BaseModel):\n item_id: str", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "UserMessageItem", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class UserMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"user\"] = \"user\"\n content: list[Annotated[InputText | InputAudio, Field(discriminator=\"type\")]]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nclass AssistantMessageItem(BaseModel):\n item_id: str", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "AssistantMessageItem", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class AssistantMessageItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n type: Literal[\"message\"] = \"message\"\n role: Literal[\"assistant\"] = \"assistant\"\n status: Literal[\"in_progress\", \"completed\", \"incomplete\"] | None = None\n content: list[AssistantText]\n # Allow extra data\n model_config = ConfigDict(extra=\"allow\")\nRealtimeMessageItem = Annotated[", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeToolCallItem", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class RealtimeToolCallItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n call_id: str | None\n type: Literal[\"function_call\"] = \"function_call\"\n status: Literal[\"in_progress\", \"completed\"]\n arguments: str\n name: str\n output: str | None = None\n # Allow extra data", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeResponse", - "kind": 6, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "class RealtimeResponse(BaseModel):\n id: str\n output: list[RealtimeMessageItem]", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeMessageItem", - "kind": 5, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "RealtimeMessageItem = Annotated[\n Union[SystemMessageItem, UserMessageItem, AssistantMessageItem],\n Field(discriminator=\"role\"),\n]\nclass RealtimeToolCallItem(BaseModel):\n item_id: str\n previous_item_id: str | None = None\n call_id: str | None\n type: Literal[\"function_call\"] = \"function_call\"\n status: Literal[\"in_progress\", \"completed\"]", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeItem", - "kind": 5, - "importPath": "src.agents.realtime.items", - "description": "src.agents.realtime.items", - "peekOfCode": "RealtimeItem = Union[RealtimeMessageItem, RealtimeToolCallItem]\nclass RealtimeResponse(BaseModel):\n id: str\n output: list[RealtimeMessageItem]", - "detail": "src.agents.realtime.items", - "documentation": {} - }, - { - "label": "RealtimeModelListener", - "kind": 6, - "importPath": "src.agents.realtime.model", - "description": "src.agents.realtime.model", - "peekOfCode": "class RealtimeModelListener(abc.ABC):\n \"\"\"A listener for realtime transport events.\"\"\"\n @abc.abstractmethod\n async def on_event(self, event: RealtimeModelEvent) -> None:\n \"\"\"Called when an event is emitted by the realtime transport.\"\"\"\n pass\nclass RealtimeModelConfig(TypedDict):\n \"\"\"Options for connecting to a realtime model.\"\"\"\n api_key: NotRequired[str | Callable[[], MaybeAwaitable[str]]]\n \"\"\"The API key (or function that returns a key) to use when connecting. If unset, the model will", - "detail": "src.agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModelConfig", - "kind": 6, - "importPath": "src.agents.realtime.model", - "description": "src.agents.realtime.model", - "peekOfCode": "class RealtimeModelConfig(TypedDict):\n \"\"\"Options for connecting to a realtime model.\"\"\"\n api_key: NotRequired[str | Callable[[], MaybeAwaitable[str]]]\n \"\"\"The API key (or function that returns a key) to use when connecting. If unset, the model will\n try to use a sane default. For example, the OpenAI Realtime model will try to use the\n `OPENAI_API_KEY` environment variable.\n \"\"\"\n url: NotRequired[str]\n \"\"\"The URL to use when connecting. If unset, the model will use a sane default. For example,\n the OpenAI Realtime model will use the default OpenAI WebSocket URL.", - "detail": "src.agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModel", - "kind": 6, - "importPath": "src.agents.realtime.model", - "description": "src.agents.realtime.model", - "peekOfCode": "class RealtimeModel(abc.ABC):\n \"\"\"Interface for connecting to a realtime model and sending/receiving events.\"\"\"\n @abc.abstractmethod\n async def connect(self, options: RealtimeModelConfig) -> None:\n \"\"\"Establish a connection to the model and keep it alive.\"\"\"\n pass\n @abc.abstractmethod\n def add_listener(self, listener: RealtimeModelListener) -> None:\n \"\"\"Add a listener to the model.\"\"\"\n pass", - "detail": "src.agents.realtime.model", - "documentation": {} - }, - { - "label": "RealtimeModelErrorEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelErrorEvent:\n \"\"\"Represents a transport‑layer error.\"\"\"\n error: Any\n type: Literal[\"error\"] = \"error\"\n@dataclass\nclass RealtimeModelToolCallEvent:\n \"\"\"Model attempted a tool/function call.\"\"\"\n name: str\n call_id: str\n arguments: str", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelToolCallEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelToolCallEvent:\n \"\"\"Model attempted a tool/function call.\"\"\"\n name: str\n call_id: str\n arguments: str\n id: str | None = None\n previous_item_id: str | None = None\n type: Literal[\"function_call\"] = \"function_call\"\n@dataclass\nclass RealtimeModelAudioEvent:", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelAudioEvent:\n \"\"\"Raw audio bytes emitted by the model.\"\"\"\n data: bytes\n response_id: str\n type: Literal[\"audio\"] = \"audio\"\n@dataclass\nclass RealtimeModelAudioInterruptedEvent:\n \"\"\"Audio interrupted.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioInterruptedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelAudioInterruptedEvent:\n \"\"\"Audio interrupted.\"\"\"\n type: Literal[\"audio_interrupted\"] = \"audio_interrupted\"\n@dataclass\nclass RealtimeModelAudioDoneEvent:\n \"\"\"Audio done.\"\"\"\n type: Literal[\"audio_done\"] = \"audio_done\"\n@dataclass\nclass RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelAudioDoneEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelAudioDoneEvent:\n \"\"\"Audio done.\"\"\"\n type: Literal[\"audio_done\"] = \"audio_done\"\n@dataclass\nclass RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"\n item_id: str\n transcript: str\n type: Literal[\"input_audio_transcription_completed\"] = \"input_audio_transcription_completed\"\n@dataclass", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelInputAudioTranscriptionCompletedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelInputAudioTranscriptionCompletedEvent:\n \"\"\"Input audio transcription completed.\"\"\"\n item_id: str\n transcript: str\n type: Literal[\"input_audio_transcription_completed\"] = \"input_audio_transcription_completed\"\n@dataclass\nclass RealtimeModelTranscriptDeltaEvent:\n \"\"\"Partial transcript update.\"\"\"\n item_id: str\n delta: str", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTranscriptDeltaEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelTranscriptDeltaEvent:\n \"\"\"Partial transcript update.\"\"\"\n item_id: str\n delta: str\n response_id: str\n type: Literal[\"transcript_delta\"] = \"transcript_delta\"\n@dataclass\nclass RealtimeModelItemUpdatedEvent:\n \"\"\"Item added to the history or updated.\"\"\"\n item: RealtimeItem", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelItemUpdatedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelItemUpdatedEvent:\n \"\"\"Item added to the history or updated.\"\"\"\n item: RealtimeItem\n type: Literal[\"item_updated\"] = \"item_updated\"\n@dataclass\nclass RealtimeModelItemDeletedEvent:\n \"\"\"Item deleted from the history.\"\"\"\n item_id: str\n type: Literal[\"item_deleted\"] = \"item_deleted\"\n@dataclass", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelItemDeletedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelItemDeletedEvent:\n \"\"\"Item deleted from the history.\"\"\"\n item_id: str\n type: Literal[\"item_deleted\"] = \"item_deleted\"\n@dataclass\nclass RealtimeModelConnectionStatusEvent:\n \"\"\"Connection status changed.\"\"\"\n status: RealtimeConnectionStatus\n type: Literal[\"connection_status\"] = \"connection_status\"\n@dataclass", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelConnectionStatusEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelConnectionStatusEvent:\n \"\"\"Connection status changed.\"\"\"\n status: RealtimeConnectionStatus\n type: Literal[\"connection_status\"] = \"connection_status\"\n@dataclass\nclass RealtimeModelTurnStartedEvent:\n \"\"\"Triggered when the model starts generating a response for a turn.\"\"\"\n type: Literal[\"turn_started\"] = \"turn_started\"\n@dataclass\nclass RealtimeModelTurnEndedEvent:", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTurnStartedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelTurnStartedEvent:\n \"\"\"Triggered when the model starts generating a response for a turn.\"\"\"\n type: Literal[\"turn_started\"] = \"turn_started\"\n@dataclass\nclass RealtimeModelTurnEndedEvent:\n \"\"\"Triggered when the model finishes generating a response for a turn.\"\"\"\n type: Literal[\"turn_ended\"] = \"turn_ended\"\n@dataclass\nclass RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelTurnEndedEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelTurnEndedEvent:\n \"\"\"Triggered when the model finishes generating a response for a turn.\"\"\"\n type: Literal[\"turn_ended\"] = \"turn_ended\"\n@dataclass\nclass RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"\n data: Any\n type: Literal[\"other\"] = \"other\"\n@dataclass\nclass RealtimeModelExceptionEvent:", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelOtherEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelOtherEvent:\n \"\"\"Used as a catchall for vendor-specific events.\"\"\"\n data: Any\n type: Literal[\"other\"] = \"other\"\n@dataclass\nclass RealtimeModelExceptionEvent:\n \"\"\"Exception occurred during model operation.\"\"\"\n exception: Exception\n context: str | None = None\n type: Literal[\"exception\"] = \"exception\"", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelExceptionEvent", - "kind": 6, - "importPath": "src.agents.realtime.model_events", - "description": "src.agents.realtime.model_events", - "peekOfCode": "class RealtimeModelExceptionEvent:\n \"\"\"Exception occurred during model operation.\"\"\"\n exception: Exception\n context: str | None = None\n type: Literal[\"exception\"] = \"exception\"\n# TODO (rm) Add usage events\nRealtimeModelEvent: TypeAlias = Union[\n RealtimeModelErrorEvent,\n RealtimeModelToolCallEvent,\n RealtimeModelAudioEvent,", - "detail": "src.agents.realtime.model_events", - "documentation": {} - }, - { - "label": "RealtimeModelRawClientMessage", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelRawClientMessage(TypedDict):\n \"\"\"A raw message to be sent to the model.\"\"\"\n type: str # explicitly required\n other_data: NotRequired[dict[str, Any]]\n \"\"\"Merged into the message body.\"\"\"\nclass RealtimeModelInputTextContent(TypedDict):\n \"\"\"A piece of text to be sent to the model.\"\"\"\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeModelUserInputMessage(TypedDict):", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelInputTextContent", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelInputTextContent(TypedDict):\n \"\"\"A piece of text to be sent to the model.\"\"\"\n type: Literal[\"input_text\"]\n text: str\nclass RealtimeModelUserInputMessage(TypedDict):\n \"\"\"A message to be sent to the model.\"\"\"\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeModelInputTextContent]\nRealtimeModelUserInput: TypeAlias = Union[str, RealtimeModelUserInputMessage]", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelUserInputMessage", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelUserInputMessage(TypedDict):\n \"\"\"A message to be sent to the model.\"\"\"\n type: Literal[\"message\"]\n role: Literal[\"user\"]\n content: list[RealtimeModelInputTextContent]\nRealtimeModelUserInput: TypeAlias = Union[str, RealtimeModelUserInputMessage]\n\"\"\"A user input to be sent to the model.\"\"\"\n# Model messages\n@dataclass\nclass RealtimeModelSendRawMessage:", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendRawMessage", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendRawMessage:\n \"\"\"Send a raw message to the model.\"\"\"\n message: RealtimeModelRawClientMessage\n \"\"\"The message to send.\"\"\"\n@dataclass\nclass RealtimeModelSendUserInput:\n \"\"\"Send a user input to the model.\"\"\"\n user_input: RealtimeModelUserInput\n \"\"\"The user input to send.\"\"\"\n@dataclass", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendUserInput", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendUserInput:\n \"\"\"Send a user input to the model.\"\"\"\n user_input: RealtimeModelUserInput\n \"\"\"The user input to send.\"\"\"\n@dataclass\nclass RealtimeModelSendAudio:\n \"\"\"Send audio to the model.\"\"\"\n audio: bytes\n commit: bool = False\n@dataclass", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendAudio", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendAudio:\n \"\"\"Send audio to the model.\"\"\"\n audio: bytes\n commit: bool = False\n@dataclass\nclass RealtimeModelSendToolOutput:\n \"\"\"Send tool output to the model.\"\"\"\n tool_call: RealtimeModelToolCallEvent\n \"\"\"The tool call to send.\"\"\"\n output: str", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendToolOutput", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendToolOutput:\n \"\"\"Send tool output to the model.\"\"\"\n tool_call: RealtimeModelToolCallEvent\n \"\"\"The tool call to send.\"\"\"\n output: str\n \"\"\"The output to send.\"\"\"\n start_response: bool\n \"\"\"Whether to start a response.\"\"\"\n@dataclass\nclass RealtimeModelSendInterrupt:", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendInterrupt", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendInterrupt:\n \"\"\"Send an interrupt to the model.\"\"\"\n@dataclass\nclass RealtimeModelSendSessionUpdate:\n \"\"\"Send a session update to the model.\"\"\"\n session_settings: RealtimeSessionModelSettings\n \"\"\"The updated session settings to send.\"\"\"\nRealtimeModelSendEvent: TypeAlias = Union[\n RealtimeModelSendRawMessage,\n RealtimeModelSendUserInput,", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "RealtimeModelSendSessionUpdate", - "kind": 6, - "importPath": "src.agents.realtime.model_inputs", - "description": "src.agents.realtime.model_inputs", - "peekOfCode": "class RealtimeModelSendSessionUpdate:\n \"\"\"Send a session update to the model.\"\"\"\n session_settings: RealtimeSessionModelSettings\n \"\"\"The updated session settings to send.\"\"\"\nRealtimeModelSendEvent: TypeAlias = Union[\n RealtimeModelSendRawMessage,\n RealtimeModelSendUserInput,\n RealtimeModelSendAudio,\n RealtimeModelSendToolOutput,\n RealtimeModelSendInterrupt,", - "detail": "src.agents.realtime.model_inputs", - "documentation": {} - }, - { - "label": "OpenAIRealtimeWebSocketModel", - "kind": 6, - "importPath": "src.agents.realtime.openai_realtime", - "description": "src.agents.realtime.openai_realtime", - "peekOfCode": "class OpenAIRealtimeWebSocketModel(RealtimeModel):\n \"\"\"A model that uses OpenAI's WebSocket API.\"\"\"\n def __init__(self) -> None:\n self.model = \"gpt-4o-realtime-preview\" # Default model\n self._websocket: ClientConnection | None = None\n self._websocket_task: asyncio.Task[None] | None = None\n self._listeners: list[RealtimeModelListener] = []\n self._current_item_id: str | None = None\n self._audio_start_time: datetime | None = None\n self._audio_length_ms: float = 0.0", - "detail": "src.agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "_ConversionHelper", - "kind": 6, - "importPath": "src.agents.realtime.openai_realtime", - "description": "src.agents.realtime.openai_realtime", - "peekOfCode": "class _ConversionHelper:\n @classmethod\n def conversation_item_to_realtime_message_item(\n cls, item: ConversationItem, previous_item_id: str | None\n ) -> RealtimeMessageItem:\n return TypeAdapter(RealtimeMessageItem).validate_python(\n {\n \"item_id\": item.id or \"\",\n \"previous_item_id\": previous_item_id,\n \"type\": item.type,", - "detail": "src.agents.realtime.openai_realtime", - "documentation": {} - }, - { - "label": "RealtimeRunner", - "kind": 6, - "importPath": "src.agents.realtime.runner", - "description": "src.agents.realtime.runner", - "peekOfCode": "class RealtimeRunner:\n \"\"\"A `RealtimeRunner` is the equivalent of `Runner` for realtime agents. It automatically\n handles multiple turns by maintaining a persistent connection with the underlying model\n layer.\n The session manages the local history copy, executes tools, runs guardrails and facilitates\n handoffs between agents.\n Since this code runs on your server, it uses WebSockets by default. You can optionally create\n your own custom model layer by implementing the `RealtimeModel` interface.\n \"\"\"\n def __init__(", - "detail": "src.agents.realtime.runner", - "documentation": {} - }, - { - "label": "RealtimeSession", - "kind": 6, - "importPath": "src.agents.realtime.session", - "description": "src.agents.realtime.session", - "peekOfCode": "class RealtimeSession(RealtimeModelListener):\n \"\"\"A connection to a realtime model. It streams events from the model to you, and allows you to\n send messages and audio to the model.\n Example:\n ```python\n runner = RealtimeRunner(agent)\n async with await runner.run() as session:\n # Send messages\n await session.send_message(\"Hello\")\n await session.send_audio(audio_bytes)", - "detail": "src.agents.realtime.session", - "documentation": {} - }, - { - "label": "trace", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def trace(\n workflow_name: str,\n trace_id: str | None = None,\n group_id: str | None = None,\n metadata: dict[str, Any] | None = None,\n disabled: bool = False,\n) -> Trace:\n \"\"\"\n Create a new trace. The trace will not be started automatically; you should either use\n it as a context manager (`with trace(...):`) or call `trace.start()` + `trace.finish()`", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "get_current_trace", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def get_current_trace() -> Trace | None:\n \"\"\"Returns the currently active trace, if present.\"\"\"\n return get_trace_provider().get_current_trace()\ndef get_current_span() -> Span[Any] | None:\n \"\"\"Returns the currently active span, if present.\"\"\"\n return get_trace_provider().get_current_span()\ndef agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "get_current_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def get_current_span() -> Span[Any] | None:\n \"\"\"Returns the currently active span, if present.\"\"\"\n return get_trace_provider().get_current_span()\ndef agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,\n output_type: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "agent_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def agent_span(\n name: str,\n handoffs: list[str] | None = None,\n tools: list[str] | None = None,\n output_type: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[AgentSpanData]:\n \"\"\"Create a new agent span. The span will not be started automatically, you should either do", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "function_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def function_span(\n name: str,\n input: str | None = None,\n output: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[FunctionSpanData]:\n \"\"\"Create a new function span. The span will not be started automatically, you should either do\n `with function_span() ...` or call `span.start()` + `span.finish()` manually.", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "generation_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def generation_span(\n input: Sequence[Mapping[str, Any]] | None = None,\n output: Sequence[Mapping[str, Any]] | None = None,\n model: str | None = None,\n model_config: Mapping[str, Any] | None = None,\n usage: dict[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[GenerationSpanData]:", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "response_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def response_span(\n response: Response | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[ResponseSpanData]:\n \"\"\"Create a new response span. The span will not be started automatically, you should either do\n `with response_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:\n response: The OpenAI Response object.", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "handoff_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def handoff_span(\n from_agent: str | None = None,\n to_agent: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[HandoffSpanData]:\n \"\"\"Create a new handoff span. The span will not be started automatically, you should either do\n `with handoff_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "custom_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def custom_span(\n name: str,\n data: dict[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[CustomSpanData]:\n \"\"\"Create a new custom span, to which you can add your own metadata. The span will not be\n started automatically, you should either do `with custom_span() ...` or call\n `span.start()` + `span.finish()` manually.", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "guardrail_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def guardrail_span(\n name: str,\n triggered: bool = False,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[GuardrailSpanData]:\n \"\"\"Create a new guardrail span. The span will not be started automatically, you should either\n do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "transcription_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def transcription_span(\n model: str | None = None,\n input: str | None = None,\n input_format: str | None = \"pcm\",\n output: str | None = None,\n model_config: Mapping[str, Any] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[TranscriptionSpanData]:", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "speech_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def speech_span(\n model: str | None = None,\n input: str | None = None,\n output: str | None = None,\n output_format: str | None = \"pcm\",\n model_config: Mapping[str, Any] | None = None,\n first_content_at: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "speech_group_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def speech_group_span(\n input: str | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[SpeechGroupSpanData]:\n \"\"\"Create a new speech group span. The span will not be started automatically, you should\n either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:\n input: The input text used for the speech request.", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "mcp_tools_span", - "kind": 2, - "importPath": "src.agents.tracing.create", - "description": "src.agents.tracing.create", - "peekOfCode": "def mcp_tools_span(\n server: str | None = None,\n result: list[str] | None = None,\n span_id: str | None = None,\n parent: Trace | Span[Any] | None = None,\n disabled: bool = False,\n) -> Span[MCPListToolsSpanData]:\n \"\"\"Create a new MCP list tools span. The span will not be started automatically, you should\n either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually.\n Args:", - "detail": "src.agents.tracing.create", - "documentation": {} - }, - { - "label": "logger", - "kind": 5, - "importPath": "src.agents.tracing.logger", - "description": "src.agents.tracing.logger", - "peekOfCode": "logger = logging.getLogger(\"openai.agents.tracing\")", - "detail": "src.agents.tracing.logger", - "documentation": {} - }, - { - "label": "TracingProcessor", - "kind": 6, - "importPath": "src.agents.tracing.processor_interface", - "description": "src.agents.tracing.processor_interface", - "peekOfCode": "class TracingProcessor(abc.ABC):\n \"\"\"Interface for processing spans.\"\"\"\n @abc.abstractmethod\n def on_trace_start(self, trace: \"Trace\") -> None:\n \"\"\"Called when a trace is started.\n Args:\n trace: The trace that started.\n \"\"\"\n pass\n @abc.abstractmethod", - "detail": "src.agents.tracing.processor_interface", - "documentation": {} - }, - { - "label": "TracingExporter", - "kind": 6, - "importPath": "src.agents.tracing.processor_interface", - "description": "src.agents.tracing.processor_interface", - "peekOfCode": "class TracingExporter(abc.ABC):\n \"\"\"Exports traces and spans. For example, could log them or send them to a backend.\"\"\"\n @abc.abstractmethod\n def export(self, items: list[\"Trace | Span[Any]\"]) -> None:\n \"\"\"Exports a list of traces and spans.\n Args:\n items: The items to export.\n \"\"\"\n pass", - "detail": "src.agents.tracing.processor_interface", - "documentation": {} - }, - { - "label": "ConsoleSpanExporter", - "kind": 6, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "class ConsoleSpanExporter(TracingExporter):\n \"\"\"Prints the traces and spans to the console.\"\"\"\n def export(self, items: list[Trace | Span[Any]]) -> None:\n for item in items:\n if isinstance(item, Trace):\n print(f\"[Exporter] Export trace_id={item.trace_id}, name={item.name}, \")\n else:\n print(f\"[Exporter] Export span: {item.export()}\")\nclass BackendSpanExporter(TracingExporter):\n def __init__(", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "BackendSpanExporter", - "kind": 6, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "class BackendSpanExporter(TracingExporter):\n def __init__(\n self,\n api_key: str | None = None,\n organization: str | None = None,\n project: str | None = None,\n endpoint: str = \"https://api.openai.com/v1/traces/ingest\",\n max_retries: int = 3,\n base_delay: float = 1.0,\n max_delay: float = 30.0,", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "BatchTraceProcessor", - "kind": 6, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "class BatchTraceProcessor(TracingProcessor):\n \"\"\"Some implementation notes:\n 1. Using Queue, which is thread-safe.\n 2. Using a background thread to export spans, to minimize any performance issues.\n 3. Spans are stored in memory until they are exported.\n \"\"\"\n def __init__(\n self,\n exporter: TracingExporter,\n max_queue_size: int = 8192,", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "default_exporter", - "kind": 2, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "def default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "default_processor", - "kind": 2, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "def default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "_global_exporter", - "kind": 5, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "_global_exporter = BackendSpanExporter()\n_global_processor = BatchTraceProcessor(_global_exporter)\ndef default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "_global_processor", - "kind": 5, - "importPath": "src.agents.tracing.processors", - "description": "src.agents.tracing.processors", - "peekOfCode": "_global_processor = BatchTraceProcessor(_global_exporter)\ndef default_exporter() -> BackendSpanExporter:\n \"\"\"The default exporter, which exports traces and spans to the backend in batches.\"\"\"\n return _global_exporter\ndef default_processor() -> BatchTraceProcessor:\n \"\"\"The default processor, which exports traces and spans to the backend in batches.\"\"\"\n return _global_processor", - "detail": "src.agents.tracing.processors", - "documentation": {} - }, - { - "label": "SynchronousMultiTracingProcessor", - "kind": 6, - "importPath": "src.agents.tracing.provider", - "description": "src.agents.tracing.provider", - "peekOfCode": "class SynchronousMultiTracingProcessor(TracingProcessor):\n \"\"\"\n Forwards all calls to a list of TracingProcessors, in order of registration.\n \"\"\"\n def __init__(self):\n # Using a tuple to avoid race conditions when iterating over processors\n self._processors: tuple[TracingProcessor, ...] = ()\n self._lock = threading.Lock()\n def add_tracing_processor(self, tracing_processor: TracingProcessor):\n \"\"\"", - "detail": "src.agents.tracing.provider", - "documentation": {} - }, - { - "label": "TraceProvider", - "kind": 6, - "importPath": "src.agents.tracing.provider", - "description": "src.agents.tracing.provider", - "peekOfCode": "class TraceProvider(ABC):\n \"\"\"Interface for creating traces and spans.\"\"\"\n @abstractmethod\n def register_processor(self, processor: TracingProcessor) -> None:\n \"\"\"Add a processor that will receive all traces and spans.\"\"\"\n @abstractmethod\n def set_processors(self, processors: list[TracingProcessor]) -> None:\n \"\"\"Replace the list of processors with ``processors``.\"\"\"\n @abstractmethod\n def get_current_trace(self) -> Trace | None:", - "detail": "src.agents.tracing.provider", - "documentation": {} - }, - { - "label": "DefaultTraceProvider", - "kind": 6, - "importPath": "src.agents.tracing.provider", - "description": "src.agents.tracing.provider", - "peekOfCode": "class DefaultTraceProvider(TraceProvider):\n def __init__(self) -> None:\n self._multi_processor = SynchronousMultiTracingProcessor()\n self._disabled = os.environ.get(\"OPENAI_AGENTS_DISABLE_TRACING\", \"false\").lower() in (\n \"true\",\n \"1\",\n )\n def register_processor(self, processor: TracingProcessor):\n \"\"\"\n Add a processor to the list of processors. Each processor will receive all traces/spans.", - "detail": "src.agents.tracing.provider", - "documentation": {} - }, - { - "label": "Scope", - "kind": 6, - "importPath": "src.agents.tracing.scope", - "description": "src.agents.tracing.scope", - "peekOfCode": "class Scope:\n \"\"\"\n Manages the current span and trace in the context.\n \"\"\"\n @classmethod\n def get_current_span(cls) -> \"Span[Any] | None\":\n return _current_span.get()\n @classmethod\n def set_current_span(cls, span: \"Span[Any] | None\") -> \"contextvars.Token[Span[Any] | None]\":\n return _current_span.set(span)", - "detail": "src.agents.tracing.scope", - "documentation": {} - }, - { - "label": "set_trace_provider", - "kind": 2, - "importPath": "src.agents.tracing.setup", - "description": "src.agents.tracing.setup", - "peekOfCode": "def set_trace_provider(provider: TraceProvider) -> None:\n \"\"\"Set the global trace provider used by tracing utilities.\"\"\"\n global GLOBAL_TRACE_PROVIDER\n GLOBAL_TRACE_PROVIDER = provider\ndef get_trace_provider() -> TraceProvider:\n \"\"\"Get the global trace provider used by tracing utilities.\"\"\"\n if GLOBAL_TRACE_PROVIDER is None:\n raise RuntimeError(\"Trace provider not set\")\n return GLOBAL_TRACE_PROVIDER", - "detail": "src.agents.tracing.setup", - "documentation": {} - }, - { - "label": "get_trace_provider", - "kind": 2, - "importPath": "src.agents.tracing.setup", - "description": "src.agents.tracing.setup", - "peekOfCode": "def get_trace_provider() -> TraceProvider:\n \"\"\"Get the global trace provider used by tracing utilities.\"\"\"\n if GLOBAL_TRACE_PROVIDER is None:\n raise RuntimeError(\"Trace provider not set\")\n return GLOBAL_TRACE_PROVIDER", - "detail": "src.agents.tracing.setup", - "documentation": {} - }, - { - "label": "SpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class SpanData(abc.ABC):\n \"\"\"\n Represents span data in the trace.\n \"\"\"\n @abc.abstractmethod\n def export(self) -> dict[str, Any]:\n \"\"\"Export the span data as a dictionary.\"\"\"\n pass\n @property\n @abc.abstractmethod", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "AgentSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class AgentSpanData(SpanData):\n \"\"\"\n Represents an Agent Span in the trace.\n Includes name, handoffs, tools, and output type.\n \"\"\"\n __slots__ = (\"name\", \"handoffs\", \"tools\", \"output_type\")\n def __init__(\n self,\n name: str,\n handoffs: list[str] | None = None,", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "FunctionSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class FunctionSpanData(SpanData):\n \"\"\"\n Represents a Function Span in the trace.\n Includes input, output and MCP data (if applicable).\n \"\"\"\n __slots__ = (\"name\", \"input\", \"output\", \"mcp_data\")\n def __init__(\n self,\n name: str,\n input: str | None,", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "GenerationSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class GenerationSpanData(SpanData):\n \"\"\"\n Represents a Generation Span in the trace.\n Includes input, output, model, model configuration, and usage.\n \"\"\"\n __slots__ = (\n \"input\",\n \"output\",\n \"model\",\n \"model_config\",", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "ResponseSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class ResponseSpanData(SpanData):\n \"\"\"\n Represents a Response Span in the trace.\n Includes response and input.\n \"\"\"\n __slots__ = (\"response\", \"input\")\n def __init__(\n self,\n response: Response | None = None,\n input: str | list[ResponseInputItemParam] | None = None,", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "HandoffSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class HandoffSpanData(SpanData):\n \"\"\"\n Represents a Handoff Span in the trace.\n Includes source and destination agents.\n \"\"\"\n __slots__ = (\"from_agent\", \"to_agent\")\n def __init__(self, from_agent: str | None, to_agent: str | None):\n self.from_agent = from_agent\n self.to_agent = to_agent\n @property", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "CustomSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class CustomSpanData(SpanData):\n \"\"\"\n Represents a Custom Span in the trace.\n Includes name and data property bag.\n \"\"\"\n __slots__ = (\"name\", \"data\")\n def __init__(self, name: str, data: dict[str, Any]):\n self.name = name\n self.data = data\n @property", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "GuardrailSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class GuardrailSpanData(SpanData):\n \"\"\"\n Represents a Guardrail Span in the trace.\n Includes name and triggered status.\n \"\"\"\n __slots__ = (\"name\", \"triggered\")\n def __init__(self, name: str, triggered: bool = False):\n self.name = name\n self.triggered = triggered\n @property", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "TranscriptionSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class TranscriptionSpanData(SpanData):\n \"\"\"\n Represents a Transcription Span in the trace.\n Includes input, output, model, and model configuration.\n \"\"\"\n __slots__ = (\n \"input\",\n \"output\",\n \"model\",\n \"model_config\",", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "SpeechSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class SpeechSpanData(SpanData):\n \"\"\"\n Represents a Speech Span in the trace.\n Includes input, output, model, model configuration, and first content timestamp.\n \"\"\"\n __slots__ = (\"input\", \"output\", \"model\", \"model_config\", \"first_content_at\")\n def __init__(\n self,\n input: str | None = None,\n output: str | None = None,", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "SpeechGroupSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class SpeechGroupSpanData(SpanData):\n \"\"\"\n Represents a Speech Group Span in the trace.\n \"\"\"\n __slots__ = \"input\"\n def __init__(\n self,\n input: str | None = None,\n ):\n self.input = input", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "MCPListToolsSpanData", - "kind": 6, - "importPath": "src.agents.tracing.span_data", - "description": "src.agents.tracing.span_data", - "peekOfCode": "class MCPListToolsSpanData(SpanData):\n \"\"\"\n Represents an MCP List Tools Span in the trace.\n Includes server and result.\n \"\"\"\n __slots__ = (\n \"server\",\n \"result\",\n )\n def __init__(self, server: str | None = None, result: list[str] | None = None):", - "detail": "src.agents.tracing.span_data", - "documentation": {} - }, - { - "label": "SpanError", - "kind": 6, - "importPath": "src.agents.tracing.spans", - "description": "src.agents.tracing.spans", - "peekOfCode": "class SpanError(TypedDict):\n message: str\n data: dict[str, Any] | None\nclass Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property\n @abc.abstractmethod", - "detail": "src.agents.tracing.spans", - "documentation": {} - }, - { - "label": "Span", - "kind": 6, - "importPath": "src.agents.tracing.spans", - "description": "src.agents.tracing.spans", - "peekOfCode": "class Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property\n @abc.abstractmethod\n def span_id(self) -> str:\n pass\n @property", - "detail": "src.agents.tracing.spans", - "documentation": {} - }, - { - "label": "NoOpSpan", - "kind": 6, - "importPath": "src.agents.tracing.spans", - "description": "src.agents.tracing.spans", - "peekOfCode": "class NoOpSpan(Span[TSpanData]):\n __slots__ = (\"_span_data\", \"_prev_span_token\")\n def __init__(self, span_data: TSpanData):\n self._span_data = span_data\n self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None\n @property\n def trace_id(self) -> str:\n return \"no-op\"\n @property\n def span_id(self) -> str:", - "detail": "src.agents.tracing.spans", - "documentation": {} - }, - { - "label": "SpanImpl", - "kind": 6, - "importPath": "src.agents.tracing.spans", - "description": "src.agents.tracing.spans", - "peekOfCode": "class SpanImpl(Span[TSpanData]):\n __slots__ = (\n \"_trace_id\",\n \"_span_id\",\n \"_parent_id\",\n \"_started_at\",\n \"_ended_at\",\n \"_error\",\n \"_prev_span_token\",\n \"_processor\",", - "detail": "src.agents.tracing.spans", - "documentation": {} - }, - { - "label": "TSpanData", - "kind": 5, - "importPath": "src.agents.tracing.spans", - "description": "src.agents.tracing.spans", - "peekOfCode": "TSpanData = TypeVar(\"TSpanData\", bound=SpanData)\nclass SpanError(TypedDict):\n message: str\n data: dict[str, Any] | None\nclass Span(abc.ABC, Generic[TSpanData]):\n @property\n @abc.abstractmethod\n def trace_id(self) -> str:\n pass\n @property", - "detail": "src.agents.tracing.spans", - "documentation": {} - }, - { - "label": "Trace", - "kind": 6, - "importPath": "src.agents.tracing.traces", - "description": "src.agents.tracing.traces", - "peekOfCode": "class Trace:\n \"\"\"\n A trace is the root level object that tracing creates. It represents a logical \"workflow\".\n \"\"\"\n @abc.abstractmethod\n def __enter__(self) -> Trace:\n pass\n @abc.abstractmethod\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass", - "detail": "src.agents.tracing.traces", - "documentation": {} - }, - { - "label": "NoOpTrace", - "kind": 6, - "importPath": "src.agents.tracing.traces", - "description": "src.agents.tracing.traces", - "peekOfCode": "class NoOpTrace(Trace):\n \"\"\"\n A no-op trace that will not be recorded.\n \"\"\"\n def __init__(self):\n self._started = False\n self._prev_context_token: contextvars.Token[Trace | None] | None = None\n def __enter__(self) -> Trace:\n if self._started:\n if not self._prev_context_token:", - "detail": "src.agents.tracing.traces", - "documentation": {} - }, - { - "label": "TraceImpl", - "kind": 6, - "importPath": "src.agents.tracing.traces", - "description": "src.agents.tracing.traces", - "peekOfCode": "class TraceImpl(Trace):\n \"\"\"\n A trace that will be recorded by the tracing library.\n \"\"\"\n __slots__ = (\n \"_name\",\n \"_trace_id\",\n \"group_id\",\n \"metadata\",\n \"_prev_context_token\",", - "detail": "src.agents.tracing.traces", - "documentation": {} - }, - { - "label": "NO_OP_TRACE", - "kind": 5, - "importPath": "src.agents.tracing.traces", - "description": "src.agents.tracing.traces", - "peekOfCode": "NO_OP_TRACE = NoOpTrace()\nclass TraceImpl(Trace):\n \"\"\"\n A trace that will be recorded by the tracing library.\n \"\"\"\n __slots__ = (\n \"_name\",\n \"_trace_id\",\n \"group_id\",\n \"metadata\",", - "detail": "src.agents.tracing.traces", - "documentation": {} - }, - { - "label": "time_iso", - "kind": 2, - "importPath": "src.agents.tracing.util", - "description": "src.agents.tracing.util", - "peekOfCode": "def time_iso() -> str:\n \"\"\"Return the current time in ISO 8601 format.\"\"\"\n return get_trace_provider().time_iso()\ndef gen_trace_id() -> str:\n \"\"\"Generate a new trace ID.\"\"\"\n return get_trace_provider().gen_trace_id()\ndef gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:", - "detail": "src.agents.tracing.util", - "documentation": {} - }, - { - "label": "gen_trace_id", - "kind": 2, - "importPath": "src.agents.tracing.util", - "description": "src.agents.tracing.util", - "peekOfCode": "def gen_trace_id() -> str:\n \"\"\"Generate a new trace ID.\"\"\"\n return get_trace_provider().gen_trace_id()\ndef gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", - "detail": "src.agents.tracing.util", - "documentation": {} - }, - { - "label": "gen_span_id", - "kind": 2, - "importPath": "src.agents.tracing.util", - "description": "src.agents.tracing.util", - "peekOfCode": "def gen_span_id() -> str:\n \"\"\"Generate a new span ID.\"\"\"\n return get_trace_provider().gen_span_id()\ndef gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", - "detail": "src.agents.tracing.util", - "documentation": {} - }, - { - "label": "gen_group_id", - "kind": 2, - "importPath": "src.agents.tracing.util", - "description": "src.agents.tracing.util", - "peekOfCode": "def gen_group_id() -> str:\n \"\"\"Generate a new group ID.\"\"\"\n return get_trace_provider().gen_group_id()", - "detail": "src.agents.tracing.util", - "documentation": {} - }, - { - "label": "attach_error_to_span", - "kind": 2, - "importPath": "src.agents.util._error_tracing", - "description": "src.agents.util._error_tracing", - "peekOfCode": "def attach_error_to_span(span: Span[Any], error: SpanError) -> None:\n span.set_error(error)\ndef attach_error_to_current_span(error: SpanError) -> None:\n span = get_current_span()\n if span:\n attach_error_to_span(span, error)\n else:\n logger.warning(f\"No span to add error {error} to\")", - "detail": "src.agents.util._error_tracing", - "documentation": {} - }, - { - "label": "attach_error_to_current_span", - "kind": 2, - "importPath": "src.agents.util._error_tracing", - "description": "src.agents.util._error_tracing", - "peekOfCode": "def attach_error_to_current_span(error: SpanError) -> None:\n span = get_current_span()\n if span:\n attach_error_to_span(span, error)\n else:\n logger.warning(f\"No span to add error {error} to\")", - "detail": "src.agents.util._error_tracing", - "documentation": {} - }, - { - "label": "validate_json", - "kind": 2, - "importPath": "src.agents.util._json", - "description": "src.agents.util._json", - "peekOfCode": "def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:\n partial_setting: bool | Literal[\"off\", \"on\", \"trailing-strings\"] = (\n \"trailing-strings\" if partial else False\n )\n try:\n validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)\n return validated\n except ValidationError as e:\n attach_error_to_current_span(\n SpanError(", - "detail": "src.agents.util._json", - "documentation": {} - }, - { - "label": "T", - "kind": 5, - "importPath": "src.agents.util._json", - "description": "src.agents.util._json", - "peekOfCode": "T = TypeVar(\"T\")\ndef validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:\n partial_setting: bool | Literal[\"off\", \"on\", \"trailing-strings\"] = (\n \"trailing-strings\" if partial else False\n )\n try:\n validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)\n return validated\n except ValidationError as e:\n attach_error_to_current_span(", - "detail": "src.agents.util._json", - "documentation": {} - }, - { - "label": "pretty_print_result", - "kind": 2, - "importPath": "src.agents.util._pretty_print", - "description": "src.agents.util._pretty_print", - "peekOfCode": "def pretty_print_result(result: \"RunResult\") -> str:\n output = \"RunResult:\"\n output += f'\\n- Last agent: Agent(name=\"{result.last_agent.name}\", ...)'\n output += (\n f\"\\n- Final output ({type(result.final_output).__name__}):\\n\"\n f\"{_indent(_final_output_str(result), 2)}\"\n )\n output += f\"\\n- {len(result.new_items)} new item(s)\"\n output += f\"\\n- {len(result.raw_responses)} raw response(s)\"\n output += f\"\\n- {len(result.input_guardrail_results)} input guardrail result(s)\"", - "detail": "src.agents.util._pretty_print", - "documentation": {} - }, - { - "label": "pretty_print_run_error_details", - "kind": 2, - "importPath": "src.agents.util._pretty_print", - "description": "src.agents.util._pretty_print", - "peekOfCode": "def pretty_print_run_error_details(result: \"RunErrorDetails\") -> str:\n output = \"RunErrorDetails:\"\n output += f'\\n- Last agent: Agent(name=\"{result.last_agent.name}\", ...)'\n output += f\"\\n- {len(result.new_items)} new item(s)\"\n output += f\"\\n- {len(result.raw_responses)} raw response(s)\"\n output += f\"\\n- {len(result.input_guardrail_results)} input guardrail result(s)\"\n output += \"\\n(See `RunErrorDetails` for more details)\"\n return output\ndef pretty_print_run_result_streaming(result: \"RunResultStreaming\") -> str:\n output = \"RunResultStreaming:\"", - "detail": "src.agents.util._pretty_print", - "documentation": {} - }, - { - "label": "pretty_print_run_result_streaming", - "kind": 2, - "importPath": "src.agents.util._pretty_print", - "description": "src.agents.util._pretty_print", - "peekOfCode": "def pretty_print_run_result_streaming(result: \"RunResultStreaming\") -> str:\n output = \"RunResultStreaming:\"\n output += f'\\n- Current agent: Agent(name=\"{result.current_agent.name}\", ...)'\n output += f\"\\n- Current turn: {result.current_turn}\"\n output += f\"\\n- Max turns: {result.max_turns}\"\n output += f\"\\n- Is complete: {result.is_complete}\"\n output += (\n f\"\\n- Final output ({type(result.final_output).__name__}):\\n\"\n f\"{_indent(_final_output_str(result), 2)}\"\n )", - "detail": "src.agents.util._pretty_print", - "documentation": {} - }, - { - "label": "transform_string_function_style", - "kind": 2, - "importPath": "src.agents.util._transforms", - "description": "src.agents.util._transforms", - "peekOfCode": "def transform_string_function_style(name: str) -> str:\n # Replace spaces with underscores\n name = name.replace(\" \", \"_\")\n # Replace non-alphanumeric characters with underscores\n name = re.sub(r\"[^a-zA-Z0-9]\", \"_\", name)\n return name.lower()", - "detail": "src.agents.util._transforms", - "documentation": {} - }, - { - "label": "T", - "kind": 5, - "importPath": "src.agents.util._types", - "description": "src.agents.util._types", - "peekOfCode": "T = TypeVar(\"T\")\nMaybeAwaitable = Union[Awaitable[T], T]", - "detail": "src.agents.util._types", - "documentation": {} - }, - { - "label": "MaybeAwaitable", - "kind": 5, - "importPath": "src.agents.util._types", - "description": "src.agents.util._types", - "peekOfCode": "MaybeAwaitable = Union[Awaitable[T], T]", - "detail": "src.agents.util._types", - "documentation": {} - }, - { - "label": "OpenAIVoiceModelProvider", - "kind": 6, - "importPath": "src.agents.voice.models.openai_model_provider", - "description": "src.agents.voice.models.openai_model_provider", - "peekOfCode": "class OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,\n project: str | None = None,", - "detail": "src.agents.voice.models.openai_model_provider", - "documentation": {} - }, - { - "label": "shared_http_client", - "kind": 2, - "importPath": "src.agents.voice.models.openai_model_provider", - "description": "src.agents.voice.models.openai_model_provider", - "peekOfCode": "def shared_http_client() -> httpx.AsyncClient:\n global _http_client\n if _http_client is None:\n _http_client = DefaultAsyncHttpxClient()\n return _http_client\nDEFAULT_STT_MODEL = \"gpt-4o-transcribe\"\nDEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(", - "detail": "src.agents.voice.models.openai_model_provider", - "documentation": {} - }, - { - "label": "DEFAULT_STT_MODEL", - "kind": 5, - "importPath": "src.agents.voice.models.openai_model_provider", - "description": "src.agents.voice.models.openai_model_provider", - "peekOfCode": "DEFAULT_STT_MODEL = \"gpt-4o-transcribe\"\nDEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,", - "detail": "src.agents.voice.models.openai_model_provider", - "documentation": {} - }, - { - "label": "DEFAULT_TTS_MODEL", - "kind": 5, - "importPath": "src.agents.voice.models.openai_model_provider", - "description": "src.agents.voice.models.openai_model_provider", - "peekOfCode": "DEFAULT_TTS_MODEL = \"gpt-4o-mini-tts\"\nclass OpenAIVoiceModelProvider(VoiceModelProvider):\n \"\"\"A voice model provider that uses OpenAI models.\"\"\"\n def __init__(\n self,\n *,\n api_key: str | None = None,\n base_url: str | None = None,\n openai_client: AsyncOpenAI | None = None,\n organization: str | None = None,", - "detail": "src.agents.voice.models.openai_model_provider", - "documentation": {} - }, - { - "label": "ErrorSentinel", - "kind": 6, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "class ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "SessionCompleteSentinel", - "kind": 6, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "class SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16\n concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0)\n concatenated_audio = (concatenated_audio * 32767).astype(np.int16)", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "WebsocketDoneSentinel", - "kind": 6, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "class WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)\n if concatenated_audio.dtype == np.float32:\n # convert to int16\n concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0)\n concatenated_audio = (concatenated_audio * 32767).astype(np.int16)\n audio_bytes = concatenated_audio.tobytes()\n return base64.b64encode(audio_bytes).decode(\"utf-8\")", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "OpenAISTTTranscriptionSession", - "kind": 6, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "class OpenAISTTTranscriptionSession(StreamedTranscriptionSession):\n \"\"\"A transcription session for OpenAI's STT model.\"\"\"\n def __init__(\n self,\n input: StreamedAudioInput,\n client: AsyncOpenAI,\n model: str,\n settings: STTModelSettings,\n trace_include_sensitive_data: bool,\n trace_include_sensitive_audio_data: bool,", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "OpenAISTTModel", - "kind": 6, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "class OpenAISTTModel(STTModel):\n \"\"\"A speech-to-text model for OpenAI.\"\"\"\n def __init__(\n self,\n model: str,\n openai_client: AsyncOpenAI,\n ):\n \"\"\"Create a new OpenAI speech-to-text model.\n Args:\n model: The name of the model to use.", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "EVENT_INACTIVITY_TIMEOUT", - "kind": 5, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "EVENT_INACTIVITY_TIMEOUT = 1000 # Timeout for inactivity in event processing\nSESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event\nSESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "SESSION_CREATION_TIMEOUT", - "kind": 5, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "SESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event\nSESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "SESSION_UPDATE_TIMEOUT", - "kind": 5, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "SESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event\nDEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "DEFAULT_TURN_DETECTION", - "kind": 5, - "importPath": "src.agents.voice.models.openai_stt", - "description": "src.agents.voice.models.openai_stt", - "peekOfCode": "DEFAULT_TURN_DETECTION = {\"type\": \"semantic_vad\"}\n@dataclass\nclass ErrorSentinel:\n error: Exception\nclass SessionCompleteSentinel:\n pass\nclass WebsocketDoneSentinel:\n pass\ndef _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str:\n concatenated_audio = np.concatenate(audio_data)", - "detail": "src.agents.voice.models.openai_stt", - "documentation": {} - }, - { - "label": "OpenAITTSModel", - "kind": 6, - "importPath": "src.agents.voice.models.openai_tts", - "description": "src.agents.voice.models.openai_tts", - "peekOfCode": "class OpenAITTSModel(TTSModel):\n \"\"\"A text-to-speech model for OpenAI.\"\"\"\n def __init__(\n self,\n model: str,\n openai_client: AsyncOpenAI,\n ):\n \"\"\"Create a new OpenAI text-to-speech model.\n Args:\n model: The name of the model to use.", - "detail": "src.agents.voice.models.openai_tts", - "documentation": {} - }, - { - "label": "VoiceStreamEventAudio", - "kind": 6, - "importPath": "src.agents.voice.events", - "description": "src.agents.voice.events", - "peekOfCode": "class VoiceStreamEventAudio:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n data: npt.NDArray[np.int16 | np.float32] | None\n \"\"\"The audio data.\"\"\"\n type: Literal[\"voice_stream_event_audio\"] = \"voice_stream_event_audio\"\n \"\"\"The type of event.\"\"\"\n@dataclass\nclass VoiceStreamEventLifecycle:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n event: Literal[\"turn_started\", \"turn_ended\", \"session_ended\"]", - "detail": "src.agents.voice.events", - "documentation": {} - }, - { - "label": "VoiceStreamEventLifecycle", - "kind": 6, - "importPath": "src.agents.voice.events", - "description": "src.agents.voice.events", - "peekOfCode": "class VoiceStreamEventLifecycle:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n event: Literal[\"turn_started\", \"turn_ended\", \"session_ended\"]\n \"\"\"The event that occurred.\"\"\"\n type: Literal[\"voice_stream_event_lifecycle\"] = \"voice_stream_event_lifecycle\"\n \"\"\"The type of event.\"\"\"\n@dataclass\nclass VoiceStreamEventError:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n error: Exception", - "detail": "src.agents.voice.events", - "documentation": {} - }, - { - "label": "VoiceStreamEventError", - "kind": 6, - "importPath": "src.agents.voice.events", - "description": "src.agents.voice.events", - "peekOfCode": "class VoiceStreamEventError:\n \"\"\"Streaming event from the VoicePipeline\"\"\"\n error: Exception\n \"\"\"The error that occurred.\"\"\"\n type: Literal[\"voice_stream_event_error\"] = \"voice_stream_event_error\"\n \"\"\"The type of event.\"\"\"\nVoiceStreamEvent: TypeAlias = Union[\n VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError\n]\n\"\"\"An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`.\"\"\"", - "detail": "src.agents.voice.events", - "documentation": {} - }, - { - "label": "STTWebsocketConnectionError", - "kind": 6, - "importPath": "src.agents.voice.exceptions", - "description": "src.agents.voice.exceptions", - "peekOfCode": "class STTWebsocketConnectionError(AgentsException):\n \"\"\"Exception raised when the STT websocket connection fails.\"\"\"\n def __init__(self, message: str):\n self.message = message", - "detail": "src.agents.voice.exceptions", - "documentation": {} - }, - { - "label": "__all__", - "kind": 5, - "importPath": "src.agents.voice.imports", - "description": "src.agents.voice.imports", - "peekOfCode": "__all__ = [\"np\", \"npt\", \"websockets\"]", - "detail": "src.agents.voice.imports", - "documentation": {} - }, - { - "label": "AudioInput", - "kind": 6, - "importPath": "src.agents.voice.input", - "description": "src.agents.voice.input", - "peekOfCode": "class AudioInput:\n \"\"\"Static audio to be used as input for the VoicePipeline.\"\"\"\n buffer: npt.NDArray[np.int16 | np.float32]\n \"\"\"\n A buffer containing the audio data for the agent. Must be a numpy array of int16 or float32.\n \"\"\"\n frame_rate: int = DEFAULT_SAMPLE_RATE\n \"\"\"The sample rate of the audio data. Defaults to 24000.\"\"\"\n sample_width: int = 2\n \"\"\"The sample width of the audio data. Defaults to 2.\"\"\"", - "detail": "src.agents.voice.input", - "documentation": {} - }, - { - "label": "StreamedAudioInput", - "kind": 6, - "importPath": "src.agents.voice.input", - "description": "src.agents.voice.input", - "peekOfCode": "class StreamedAudioInput:\n \"\"\"Audio input represented as a stream of audio data. You can pass this to the `VoicePipeline`\n and then push audio data into the queue using the `add_audio` method.\n \"\"\"\n def __init__(self):\n self.queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = asyncio.Queue()\n async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]):\n \"\"\"Adds more audio data to the stream.\n Args:\n audio: The audio data to add. Must be a numpy array of int16 or float32.", - "detail": "src.agents.voice.input", - "documentation": {} - }, - { - "label": "DEFAULT_SAMPLE_RATE", - "kind": 5, - "importPath": "src.agents.voice.input", - "description": "src.agents.voice.input", - "peekOfCode": "DEFAULT_SAMPLE_RATE = 24000\ndef _buffer_to_audio_file(\n buffer: npt.NDArray[np.int16 | np.float32],\n frame_rate: int = DEFAULT_SAMPLE_RATE,\n sample_width: int = 2,\n channels: int = 1,\n) -> tuple[str, io.BytesIO, str]:\n if buffer.dtype == np.float32:\n # convert to int16\n buffer = np.clip(buffer, -1.0, 1.0)", - "detail": "src.agents.voice.input", - "documentation": {} - }, - { - "label": "TTSModelSettings", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.\n \"\"\"\n buffer_size: int = 120\n \"\"\"The minimal size of the chunks of audio data that are being streamed out.\"\"\"\n dtype: npt.DTypeLike = np.int16", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "TTSModel", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class TTSModel(abc.ABC):\n \"\"\"A text-to-speech model that can convert text into audio output.\"\"\"\n @property\n @abc.abstractmethod\n def model_name(self) -> str:\n \"\"\"The name of the TTS model.\"\"\"\n pass\n @abc.abstractmethod\n def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:\n \"\"\"Given a text string, produces a stream of audio bytes, in PCM format.", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "StreamedTranscriptionSession", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class StreamedTranscriptionSession(abc.ABC):\n \"\"\"A streamed transcription of audio input.\"\"\"\n @abc.abstractmethod\n def transcribe_turns(self) -> AsyncIterator[str]:\n \"\"\"Yields a stream of text transcriptions. Each transcription is a turn in the conversation.\n This method is expected to return only after `close()` is called.\n \"\"\"\n pass\n @abc.abstractmethod\n async def close(self) -> None:", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "STTModelSettings", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class STTModelSettings:\n \"\"\"Settings for a speech-to-text model.\"\"\"\n prompt: str | None = None\n \"\"\"Instructions for the model to follow.\"\"\"\n language: str | None = None\n \"\"\"The language of the audio input.\"\"\"\n temperature: float | None = None\n \"\"\"The temperature of the model.\"\"\"\n turn_detection: dict[str, Any] | None = None\n \"\"\"The turn detection settings for the model when using streamed audio input.\"\"\"", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "STTModel", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class STTModel(abc.ABC):\n \"\"\"A speech-to-text model that can convert audio input into text.\"\"\"\n @property\n @abc.abstractmethod\n def model_name(self) -> str:\n \"\"\"The name of the STT model.\"\"\"\n pass\n @abc.abstractmethod\n async def transcribe(\n self,", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "VoiceModelProvider", - "kind": 6, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "class VoiceModelProvider(abc.ABC):\n \"\"\"The base interface for a voice model provider.\n A model provider is responsible for creating speech-to-text and text-to-speech models, given a\n name.\n \"\"\"\n @abc.abstractmethod\n def get_stt_model(self, model_name: str | None) -> STTModel:\n \"\"\"Get a speech-to-text model by name.\n Args:\n model_name: The name of the model to get.", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "DEFAULT_TTS_INSTRUCTIONS", - "kind": 5, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "DEFAULT_TTS_INSTRUCTIONS = (\n \"You will receive partial sentences. Do not complete the sentence, just read out the text.\"\n)\nDEFAULT_TTS_BUFFER_SIZE = 120\nTTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "DEFAULT_TTS_BUFFER_SIZE", - "kind": 5, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "DEFAULT_TTS_BUFFER_SIZE = 120\nTTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "TTSVoice", - "kind": 5, - "importPath": "src.agents.voice.model", - "description": "src.agents.voice.model", - "peekOfCode": "TTSVoice = Literal[\"alloy\", \"ash\", \"coral\", \"echo\", \"fable\", \"onyx\", \"nova\", \"sage\", \"shimmer\"]\n\"\"\"Exportable type for the TTSModelSettings voice enum\"\"\"\n@dataclass\nclass TTSModelSettings:\n \"\"\"Settings for a TTS model.\"\"\"\n voice: TTSVoice | None = None\n \"\"\"\n The voice to use for the TTS model. If not provided, the default voice for the respective model\n will be used.\n \"\"\"", - "detail": "src.agents.voice.model", - "documentation": {} - }, - { - "label": "VoicePipeline", - "kind": 6, - "importPath": "src.agents.voice.pipeline", - "description": "src.agents.voice.pipeline", - "peekOfCode": "class VoicePipeline:\n \"\"\"An opinionated voice agent pipeline. It works in three steps:\n 1. Transcribe audio input into text.\n 2. Run the provided `workflow`, which produces a sequence of text responses.\n 3. Convert the text responses into streaming audio output.\n \"\"\"\n def __init__(\n self,\n *,\n workflow: VoiceWorkflowBase,", - "detail": "src.agents.voice.pipeline", - "documentation": {} - }, - { - "label": "VoicePipelineConfig", - "kind": 6, - "importPath": "src.agents.voice.pipeline_config", - "description": "src.agents.voice.pipeline_config", - "peekOfCode": "class VoicePipelineConfig:\n \"\"\"Configuration for a `VoicePipeline`.\"\"\"\n model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider)\n \"\"\"The voice model provider to use for the pipeline. Defaults to OpenAI.\"\"\"\n tracing_disabled: bool = False\n \"\"\"Whether to disable tracing of the pipeline. Defaults to `False`.\"\"\"\n trace_include_sensitive_data: bool = True\n \"\"\"Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the\n voice pipeline, and not for anything that goes on inside your Workflow.\"\"\"\n trace_include_sensitive_audio_data: bool = True", - "detail": "src.agents.voice.pipeline_config", - "documentation": {} - }, - { - "label": "StreamedAudioResult", - "kind": 6, - "importPath": "src.agents.voice.result", - "description": "src.agents.voice.result", - "peekOfCode": "class StreamedAudioResult:\n \"\"\"The output of a `VoicePipeline`. Streams events and audio data as they're generated.\"\"\"\n def __init__(\n self,\n tts_model: TTSModel,\n tts_settings: TTSModelSettings,\n voice_pipeline_config: VoicePipelineConfig,\n ):\n \"\"\"Create a new `StreamedAudioResult` instance.\n Args:", - "detail": "src.agents.voice.result", - "documentation": {} - }, - { - "label": "get_sentence_based_splitter", - "kind": 2, - "importPath": "src.agents.voice.utils", - "description": "src.agents.voice.utils", - "peekOfCode": "def get_sentence_based_splitter(\n min_sentence_length: int = 20,\n) -> Callable[[str], tuple[str, str]]:\n \"\"\"Returns a function that splits text into chunks based on sentence boundaries.\n Args:\n min_sentence_length: The minimum length of a sentence to be included in a chunk.\n Returns:\n A function that splits text into chunks based on sentence boundaries.\n \"\"\"\n def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]:", - "detail": "src.agents.voice.utils", - "documentation": {} - }, - { - "label": "VoiceWorkflowBase", - "kind": 6, - "importPath": "src.agents.voice.workflow", - "description": "src.agents.voice.workflow", - "peekOfCode": "class VoiceWorkflowBase(abc.ABC):\n \"\"\"\n A base class for a voice workflow. You must implement the `run` method. A \"workflow\" is any\n code you want, that receives a transcription and yields text that will be turned into speech\n by a text-to-speech model.\n In most cases, you'll create `Agent`s and use `Runner.run_streamed()` to run them, returning\n some or all of the text events from the stream. You can use the `VoiceWorkflowHelper` class to\n help with extracting text events from the stream.\n If you have a simple workflow that has a single starting agent and no custom logic, you can\n use `SingleAgentVoiceWorkflow` directly.", - "detail": "src.agents.voice.workflow", - "documentation": {} - }, - { - "label": "VoiceWorkflowHelper", - "kind": 6, - "importPath": "src.agents.voice.workflow", - "description": "src.agents.voice.workflow", - "peekOfCode": "class VoiceWorkflowHelper:\n @classmethod\n async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]:\n \"\"\"Wraps a `RunResultStreaming` object and yields text events from the stream.\"\"\"\n async for event in result.stream_events():\n if (\n event.type == \"raw_response_event\"\n and event.data.type == \"response.output_text.delta\"\n ):\n yield event.data.delta", - "detail": "src.agents.voice.workflow", - "documentation": {} - }, - { - "label": "SingleAgentWorkflowCallbacks", - "kind": 6, - "importPath": "src.agents.voice.workflow", - "description": "src.agents.voice.workflow", - "peekOfCode": "class SingleAgentWorkflowCallbacks:\n def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:\n \"\"\"Called when the workflow is run.\"\"\"\n pass\nclass SingleAgentVoiceWorkflow(VoiceWorkflowBase):\n \"\"\"A simple voice workflow that runs a single agent. Each transcription and result is added to\n the input history.\n For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic,\n custom configs), subclass `VoiceWorkflowBase` and implement your own logic.\n \"\"\"", - "detail": "src.agents.voice.workflow", - "documentation": {} - }, - { - "label": "SingleAgentVoiceWorkflow", - "kind": 6, - "importPath": "src.agents.voice.workflow", - "description": "src.agents.voice.workflow", - "peekOfCode": "class SingleAgentVoiceWorkflow(VoiceWorkflowBase):\n \"\"\"A simple voice workflow that runs a single agent. Each transcription and result is added to\n the input history.\n For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic,\n custom configs), subclass `VoiceWorkflowBase` and implement your own logic.\n \"\"\"\n def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None):\n \"\"\"Create a new single agent voice workflow.\n Args:\n agent: The agent to run.", - "detail": "src.agents.voice.workflow", - "documentation": {} - }, - { - "label": "set_default_openai_key", - "kind": 2, - "importPath": "src.agents._config", - "description": "src.agents._config", - "peekOfCode": "def set_default_openai_key(key: str, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_key(key)\n if use_for_tracing:\n set_tracing_export_api_key(key)\ndef set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_client(client)\n if use_for_tracing:\n set_tracing_export_api_key(client.api_key)\ndef set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":", - "detail": "src.agents._config", - "documentation": {} - }, - { - "label": "set_default_openai_client", - "kind": 2, - "importPath": "src.agents._config", - "description": "src.agents._config", - "peekOfCode": "def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:\n _openai_shared.set_default_openai_client(client)\n if use_for_tracing:\n set_tracing_export_api_key(client.api_key)\ndef set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":\n _openai_shared.set_use_responses_by_default(False)\n else:\n _openai_shared.set_use_responses_by_default(True)", - "detail": "src.agents._config", - "documentation": {} - }, - { - "label": "set_default_openai_api", - "kind": 2, - "importPath": "src.agents._config", - "description": "src.agents._config", - "peekOfCode": "def set_default_openai_api(api: Literal[\"chat_completions\", \"responses\"]) -> None:\n if api == \"chat_completions\":\n _openai_shared.set_use_responses_by_default(False)\n else:\n _openai_shared.set_use_responses_by_default(True)", - "detail": "src.agents._config", - "documentation": {} - }, - { - "label": "DONT_LOG_MODEL_DATA", - "kind": 5, - "importPath": "src.agents._debug", - "description": "src.agents._debug", - "peekOfCode": "DONT_LOG_MODEL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_MODEL_DATA\")\n\"\"\"By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this\nflag to enable logging them.\n\"\"\"\nDONT_LOG_TOOL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_TOOL_DATA\")\n\"\"\"By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set\nthis flag to enable logging them.\n\"\"\"", - "detail": "src.agents._debug", - "documentation": {} - }, - { - "label": "DONT_LOG_TOOL_DATA", - "kind": 5, - "importPath": "src.agents._debug", - "description": "src.agents._debug", - "peekOfCode": "DONT_LOG_TOOL_DATA = _debug_flag_enabled(\"OPENAI_AGENTS_DONT_LOG_TOOL_DATA\")\n\"\"\"By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set\nthis flag to enable logging them.\n\"\"\"", - "detail": "src.agents._debug", - "documentation": {} - }, - { - "label": "QueueCompleteSentinel", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class QueueCompleteSentinel:\n pass\nQUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel()\n_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "AgentToolUseTracker", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)\n else:\n self.agent_to_tools.append((agent, tool_names))\n def has_used_tools(self, agent: Agent[Any]) -> bool:", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ToolRunHandoff", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class ToolRunHandoff:\n handoff: Handoff\n tool_call: ResponseFunctionToolCall\n@dataclass\nclass ToolRunMCPApprovalRequest:\n request_item: McpApprovalRequest\n mcp_tool: HostedMCPTool\n@dataclass\nclass ToolRunLocalShellCall:\n tool_call: LocalShellCall", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ToolRunMCPApprovalRequest", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class ToolRunMCPApprovalRequest:\n request_item: McpApprovalRequest\n mcp_tool: HostedMCPTool\n@dataclass\nclass ToolRunLocalShellCall:\n tool_call: LocalShellCall\n local_shell_tool: LocalShellTool\n@dataclass\nclass ProcessedResponse:\n new_items: list[RunItem]", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ToolRunLocalShellCall", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class ToolRunLocalShellCall:\n tool_call: LocalShellCall\n local_shell_tool: LocalShellTool\n@dataclass\nclass ProcessedResponse:\n new_items: list[RunItem]\n handoffs: list[ToolRunHandoff]\n functions: list[ToolRunFunction]\n computer_actions: list[ToolRunComputerAction]\n local_shell_calls: list[ToolRunLocalShellCall]", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ProcessedResponse", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class ProcessedResponse:\n new_items: list[RunItem]\n handoffs: list[ToolRunHandoff]\n functions: list[ToolRunFunction]\n computer_actions: list[ToolRunComputerAction]\n local_shell_calls: list[ToolRunLocalShellCall]\n tools_used: list[str] # Names of all tools used, including hosted tools\n mcp_approval_requests: list[ToolRunMCPApprovalRequest] # Only requests with callbacks\n def has_tools_or_approvals_to_run(self) -> bool:\n # Handoffs, functions and computer actions need local processing", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepHandoff", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class NextStepHandoff:\n new_agent: Agent[Any]\n@dataclass\nclass NextStepFinalOutput:\n output: Any\n@dataclass\nclass NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepFinalOutput", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class NextStepFinalOutput:\n output: Any\n@dataclass\nclass NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "NextStepRunAgain", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class NextStepRunAgain:\n pass\n@dataclass\nclass SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"\n model_response: ModelResponse\n \"\"\"The model response for the current step.\"\"\"\n pre_step_items: list[RunItem]", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "SingleStepResult", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class SingleStepResult:\n original_input: str | list[TResponseInputItem]\n \"\"\"The input items i.e. the items before run() was called. May be mutated by handoff input\n filters.\"\"\"\n model_response: ModelResponse\n \"\"\"The model response for the current step.\"\"\"\n pre_step_items: list[RunItem]\n \"\"\"Items generated before the current step.\"\"\"\n new_step_items: list[RunItem]\n \"\"\"Items generated during this current step.\"\"\"", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "RunImpl", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class RunImpl:\n @classmethod\n async def execute_tools_and_side_effects(\n cls,\n *,\n agent: Agent[TContext],\n # The original input to the Runner\n original_input: str | list[TResponseInputItem],\n # Everything generated by Runner since the original input, but before the current step\n pre_step_items: list[RunItem],", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "TraceCtxManager", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class TraceCtxManager:\n \"\"\"Creates a trace only if there is no current trace, and manages the trace lifecycle.\"\"\"\n def __init__(\n self,\n workflow_name: str,\n trace_id: str | None,\n group_id: str | None,\n metadata: dict[str, Any] | None,\n disabled: bool,\n ):", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ComputerAction", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class ComputerAction:\n @classmethod\n async def execute(\n cls,\n *,\n agent: Agent[TContext],\n action: ToolRunComputerAction,\n hooks: RunHooks[TContext],\n context_wrapper: RunContextWrapper[TContext],\n config: RunConfig,", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "LocalShellAction", - "kind": 6, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "class LocalShellAction:\n @classmethod\n async def execute(\n cls,\n *,\n agent: Agent[TContext],\n call: ToolRunLocalShellCall,\n hooks: RunHooks[TContext],\n context_wrapper: RunContextWrapper[TContext],\n config: RunConfig,", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "get_model_tracing_impl", - "kind": 2, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "def get_model_tracing_impl(\n tracing_disabled: bool, trace_include_sensitive_data: bool\n) -> ModelTracing:\n if tracing_disabled:\n return ModelTracing.DISABLED\n elif trace_include_sensitive_data:\n return ModelTracing.ENABLED\n else:\n return ModelTracing.ENABLED_WITHOUT_DATA\nclass RunImpl:", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "QUEUE_COMPLETE_SENTINEL", - "kind": 5, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "QUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel()\n_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "_NOT_FINAL_OUTPUT", - "kind": 5, - "importPath": "src.agents._run_impl", - "description": "src.agents._run_impl", - "peekOfCode": "_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@dataclass\nclass AgentToolUseTracker:\n agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list)\n \"\"\"Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.\"\"\"\n def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None:\n existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None)\n if existing_data:\n existing_data[1].extend(tool_names)\n else:", - "detail": "src.agents._run_impl", - "documentation": {} - }, - { - "label": "ToolsToFinalOutputResult", - "kind": 6, - "importPath": "src.agents.agent", - "description": "src.agents.agent", - "peekOfCode": "class ToolsToFinalOutputResult:\n is_final_output: bool\n \"\"\"Whether this is the final output. If False, the LLM will run again and receive the tool call\n output.\n \"\"\"\n final_output: Any | None = None\n \"\"\"The final output. Can be None if `is_final_output` is False, otherwise must match the\n `output_type` of the agent.\n \"\"\"\nToolsToFinalOutputFunction: TypeAlias = Callable[", - "detail": "src.agents.agent", - "documentation": {} - }, - { - "label": "StopAtTools", - "kind": 6, - "importPath": "src.agents.agent", - "description": "src.agents.agent", - "peekOfCode": "class StopAtTools(TypedDict):\n stop_at_tool_names: list[str]\n \"\"\"A list of tool names, any of which will stop the agent from running further.\"\"\"\nclass MCPConfig(TypedDict):\n \"\"\"Configuration for MCP servers.\"\"\"\n convert_schemas_to_strict: NotRequired[bool]\n \"\"\"If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a\n best-effort conversion, so some schemas may not be convertible. Defaults to False.\n \"\"\"\n@dataclass", - "detail": "src.agents.agent", - "documentation": {} - }, - { - "label": "MCPConfig", - "kind": 6, - "importPath": "src.agents.agent", - "description": "src.agents.agent", - "peekOfCode": "class MCPConfig(TypedDict):\n \"\"\"Configuration for MCP servers.\"\"\"\n convert_schemas_to_strict: NotRequired[bool]\n \"\"\"If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a\n best-effort conversion, so some schemas may not be convertible. Defaults to False.\n \"\"\"\n@dataclass\nclass AgentBase(Generic[TContext]):\n \"\"\"Base class for `Agent` and `RealtimeAgent`.\"\"\"\n name: str", - "detail": "src.agents.agent", - "documentation": {} - }, - { - "label": "AgentBase", - "kind": 6, - "importPath": "src.agents.agent", - "description": "src.agents.agent", - "peekOfCode": "class AgentBase(Generic[TContext]):\n \"\"\"Base class for `Agent` and `RealtimeAgent`.\"\"\"\n name: str\n \"\"\"The name of the agent.\"\"\"\n handoff_description: str | None = None\n \"\"\"A description of the agent. This is used when the agent is used as a handoff, so that an\n LLM knows what it does and when to invoke it.\n \"\"\"\n tools: list[Tool] = field(default_factory=list)\n \"\"\"A list of tools that the agent can use.\"\"\"", - "detail": "src.agents.agent", - "documentation": {} - }, - { - "label": "Agent", - "kind": 6, - "importPath": "src.agents.agent", - "description": "src.agents.agent", - "peekOfCode": "class Agent(AgentBase, Generic[TContext]):\n \"\"\"An agent is an AI model configured with instructions, tools, guardrails, handoffs and more.\n We strongly recommend passing `instructions`, which is the \"system prompt\" for the agent. In\n addition, you can pass `handoff_description`, which is a human-readable description of the\n agent, used when the agent is used inside tools/handoffs.\n Agents are generic on the context type. The context is a (mutable) object you create. It is\n passed to tool functions, handoffs, guardrails, etc.\n See `AgentBase` for base parameters that are shared with `RealtimeAgent`s.\n \"\"\"\n instructions: (", - "detail": "src.agents.agent", - "documentation": {} - }, - { - "label": "AgentOutputSchemaBase", - "kind": 6, - "importPath": "src.agents.agent_output", - "description": "src.agents.agent_output", - "peekOfCode": "class AgentOutputSchemaBase(abc.ABC):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n @abc.abstractmethod\n def is_plain_text(self) -> bool:\n \"\"\"Whether the output type is plain text (versus a JSON object).\"\"\"\n pass\n @abc.abstractmethod\n def name(self) -> str:", - "detail": "src.agents.agent_output", - "documentation": {} - }, - { - "label": "AgentOutputSchema", - "kind": 6, - "importPath": "src.agents.agent_output", - "description": "src.agents.agent_output", - "peekOfCode": "class AgentOutputSchema(AgentOutputSchemaBase):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n output_type: type[Any]\n \"\"\"The type of the output.\"\"\"\n _type_adapter: TypeAdapter[Any]\n \"\"\"A type adapter that wraps the output type, so that we can validate JSON.\"\"\"\n _is_wrapped: bool\n \"\"\"Whether the output type is wrapped in a dictionary. This is generally done if the base", - "detail": "src.agents.agent_output", - "documentation": {} - }, - { - "label": "_WRAPPER_DICT_KEY", - "kind": 5, - "importPath": "src.agents.agent_output", - "description": "src.agents.agent_output", - "peekOfCode": "_WRAPPER_DICT_KEY = \"response\"\nclass AgentOutputSchemaBase(abc.ABC):\n \"\"\"An object that captures the JSON schema of the output, as well as validating/parsing JSON\n produced by the LLM into the output type.\n \"\"\"\n @abc.abstractmethod\n def is_plain_text(self) -> bool:\n \"\"\"Whether the output type is plain text (versus a JSON object).\"\"\"\n pass\n @abc.abstractmethod", - "detail": "src.agents.agent_output", - "documentation": {} - }, - { - "label": "Computer", - "kind": 6, - "importPath": "src.agents.computer", - "description": "src.agents.computer", - "peekOfCode": "class Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod\n def dimensions(self) -> tuple[int, int]:", - "detail": "src.agents.computer", - "documentation": {} - }, - { - "label": "AsyncComputer", - "kind": 6, - "importPath": "src.agents.computer", - "description": "src.agents.computer", - "peekOfCode": "class AsyncComputer(abc.ABC):\n \"\"\"A computer implemented with async operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod\n def dimensions(self) -> tuple[int, int]:", - "detail": "src.agents.computer", - "documentation": {} - }, - { - "label": "Environment", - "kind": 5, - "importPath": "src.agents.computer", - "description": "src.agents.computer", - "peekOfCode": "Environment = Literal[\"mac\", \"windows\", \"ubuntu\", \"browser\"]\nButton = Literal[\"left\", \"right\", \"wheel\", \"back\", \"forward\"]\nclass Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property", - "detail": "src.agents.computer", - "documentation": {} - }, - { - "label": "Button", - "kind": 5, - "importPath": "src.agents.computer", - "description": "src.agents.computer", - "peekOfCode": "Button = Literal[\"left\", \"right\", \"wheel\", \"back\", \"forward\"]\nclass Computer(abc.ABC):\n \"\"\"A computer implemented with sync operations. The Computer interface abstracts the\n operations needed to control a computer or browser.\"\"\"\n @property\n @abc.abstractmethod\n def environment(self) -> Environment:\n pass\n @property\n @abc.abstractmethod", - "detail": "src.agents.computer", - "documentation": {} - }, - { - "label": "RunErrorDetails", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class RunErrorDetails:\n \"\"\"Data collected from an agent run when an exception occurs.\"\"\"\n input: str | list[TResponseInputItem]\n new_items: list[RunItem]\n raw_responses: list[ModelResponse]\n last_agent: Agent[Any]\n context_wrapper: RunContextWrapper[Any]\n input_guardrail_results: list[InputGuardrailResult]\n output_guardrail_results: list[OutputGuardrailResult]\n def __str__(self) -> str:", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "AgentsException", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class AgentsException(Exception):\n \"\"\"Base class for all exceptions in the Agents SDK.\"\"\"\n run_data: RunErrorDetails | None\n def __init__(self, *args: object) -> None:\n super().__init__(*args)\n self.run_data = None\nclass MaxTurnsExceeded(AgentsException):\n \"\"\"Exception raised when the maximum number of turns is exceeded.\"\"\"\n message: str\n def __init__(self, message: str):", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "MaxTurnsExceeded", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class MaxTurnsExceeded(AgentsException):\n \"\"\"Exception raised when the maximum number of turns is exceeded.\"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass ModelBehaviorError(AgentsException):\n \"\"\"Exception raised when the model does something unexpected, e.g. calling a tool that doesn't\n exist, or providing malformed JSON.\n \"\"\"", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "ModelBehaviorError", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class ModelBehaviorError(AgentsException):\n \"\"\"Exception raised when the model does something unexpected, e.g. calling a tool that doesn't\n exist, or providing malformed JSON.\n \"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass UserError(AgentsException):\n \"\"\"Exception raised when the user makes an error using the SDK.\"\"\"", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "UserError", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class UserError(AgentsException):\n \"\"\"Exception raised when the user makes an error using the SDK.\"\"\"\n message: str\n def __init__(self, message: str):\n self.message = message\n super().__init__(message)\nclass InputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: InputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "InputGuardrailTripwireTriggered", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class InputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: InputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"\n def __init__(self, guardrail_result: InputGuardrailResult):\n self.guardrail_result = guardrail_result\n super().__init__(\n f\"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire\"\n )\nclass OutputGuardrailTripwireTriggered(AgentsException):", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "OutputGuardrailTripwireTriggered", - "kind": 6, - "importPath": "src.agents.exceptions", - "description": "src.agents.exceptions", - "peekOfCode": "class OutputGuardrailTripwireTriggered(AgentsException):\n \"\"\"Exception raised when a guardrail tripwire is triggered.\"\"\"\n guardrail_result: OutputGuardrailResult\n \"\"\"The result data of the guardrail that was triggered.\"\"\"\n def __init__(self, guardrail_result: OutputGuardrailResult):\n self.guardrail_result = guardrail_result\n super().__init__(\n f\"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire\"\n )", - "detail": "src.agents.exceptions", - "documentation": {} - }, - { - "label": "FuncSchema", - "kind": 6, - "importPath": "src.agents.function_schema", - "description": "src.agents.function_schema", - "peekOfCode": "class FuncSchema:\n \"\"\"\n Captures the schema for a python function, in preparation for sending it to an LLM as a tool.\n \"\"\"\n name: str\n \"\"\"The name of the function.\"\"\"\n description: str | None\n \"\"\"The description of the function.\"\"\"\n params_pydantic_model: type[BaseModel]\n \"\"\"A Pydantic model that represents the function's parameters.\"\"\"", - "detail": "src.agents.function_schema", - "documentation": {} - }, - { - "label": "FuncDocumentation", - "kind": 6, - "importPath": "src.agents.function_schema", - "description": "src.agents.function_schema", - "peekOfCode": "class FuncDocumentation:\n \"\"\"Contains metadata about a python function, extracted from its docstring.\"\"\"\n name: str\n \"\"\"The name of the function, via `__name__`.\"\"\"\n description: str | None\n \"\"\"The description of the function, derived from the docstring.\"\"\"\n param_descriptions: dict[str, str] | None\n \"\"\"The parameter descriptions of the function, derived from the docstring.\"\"\"\nDocstringStyle = Literal[\"google\", \"numpy\", \"sphinx\"]\n# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This", - "detail": "src.agents.function_schema", - "documentation": {} - }, - { - "label": "generate_func_documentation", - "kind": 2, - "importPath": "src.agents.function_schema", - "description": "src.agents.function_schema", - "peekOfCode": "def generate_func_documentation(\n func: Callable[..., Any], style: DocstringStyle | None = None\n) -> FuncDocumentation:\n \"\"\"\n Extracts metadata from a function docstring, in preparation for sending it to an LLM as a tool.\n Args:\n func: The function to extract documentation from.\n style: The style of the docstring to use for parsing. If not provided, we will attempt to\n auto-detect the style.\n Returns:", - "detail": "src.agents.function_schema", - "documentation": {} - }, - { - "label": "function_schema", - "kind": 2, - "importPath": "src.agents.function_schema", - "description": "src.agents.function_schema", - "peekOfCode": "def function_schema(\n func: Callable[..., Any],\n docstring_style: DocstringStyle | None = None,\n name_override: str | None = None,\n description_override: str | None = None,\n use_docstring_info: bool = True,\n strict_json_schema: bool = True,\n) -> FuncSchema:\n \"\"\"\n Given a python function, extracts a `FuncSchema` from it, capturing the name, description,", - "detail": "src.agents.function_schema", - "documentation": {} - }, - { - "label": "DocstringStyle", - "kind": 5, - "importPath": "src.agents.function_schema", - "description": "src.agents.function_schema", - "peekOfCode": "DocstringStyle = Literal[\"google\", \"numpy\", \"sphinx\"]\n# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This\n# code approximates it.\ndef _detect_docstring_style(doc: str) -> DocstringStyle:\n scores: dict[DocstringStyle, int] = {\"sphinx\": 0, \"numpy\": 0, \"google\": 0}\n # Sphinx style detection: look for :param, :type, :return:, and :rtype:\n sphinx_patterns = [r\"^:param\\s\", r\"^:type\\s\", r\"^:return:\", r\"^:rtype:\"]\n for pattern in sphinx_patterns:\n if re.search(pattern, doc, re.MULTILINE):\n scores[\"sphinx\"] += 1", - "detail": "src.agents.function_schema", - "documentation": {} - }, - { - "label": "GuardrailFunctionOutput", - "kind": 6, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "class GuardrailFunctionOutput:\n \"\"\"The output of a guardrail function.\"\"\"\n output_info: Any\n \"\"\"\n Optional information about the guardrail's output. For example, the guardrail could include\n information about the checks it performed and granular results.\n \"\"\"\n tripwire_triggered: bool\n \"\"\"\n Whether the tripwire was triggered. If triggered, the agent's execution will be halted.", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "InputGuardrailResult", - "kind": 6, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "class InputGuardrailResult:\n \"\"\"The result of a guardrail run.\"\"\"\n guardrail: InputGuardrail[Any]\n \"\"\"\n The guardrail that was run.\n \"\"\"\n output: GuardrailFunctionOutput\n \"\"\"The output of the guardrail function.\"\"\"\n@dataclass\nclass OutputGuardrailResult:", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "OutputGuardrailResult", - "kind": 6, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "class OutputGuardrailResult:\n \"\"\"The result of a guardrail run.\"\"\"\n guardrail: OutputGuardrail[Any]\n \"\"\"\n The guardrail that was run.\n \"\"\"\n agent_output: Any\n \"\"\"\n The output of the agent that was checked by the guardrail.\n \"\"\"", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "InputGuardrail", - "kind": 6, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "class InputGuardrail(Generic[TContext]):\n \"\"\"Input guardrails are checks that run in parallel to the agent's execution.\n They can be used to do things like:\n - Check if input messages are off-topic\n - Take over control of the agent's execution if an unexpected input is detected\n You can use the `@input_guardrail()` decorator to turn a function into an `InputGuardrail`, or\n create an `InputGuardrail` manually.\n Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, the agent\n execution will immediately stop and a `InputGuardrailTripwireTriggered` exception will be raised\n \"\"\"", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "OutputGuardrail", - "kind": 6, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "class OutputGuardrail(Generic[TContext]):\n \"\"\"Output guardrails are checks that run on the final output of an agent.\n They can be used to do check if the output passes certain validation criteria\n You can use the `@output_guardrail()` decorator to turn a function into an `OutputGuardrail`,\n or create an `OutputGuardrail` manually.\n Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, a\n `OutputGuardrailTripwireTriggered` exception will be raised.\n \"\"\"\n guardrail_function: Callable[\n [RunContextWrapper[TContext], Agent[Any], Any],", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "input_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n func: _InputGuardrailFuncAsync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n *,", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "input_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncAsync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],\n InputGuardrail[TContext_co],", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "input_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def input_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],\n InputGuardrail[TContext_co],\n]: ...\ndef input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co]\n | _InputGuardrailFuncAsync[TContext_co]", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "input_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co]\n | _InputGuardrailFuncAsync[TContext_co]\n | None = None,\n *,\n name: str | None = None,\n) -> (\n InputGuardrail[TContext_co]\n | Callable[\n [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]],", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "output_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n func: _OutputGuardrailFuncAsync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n *,", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "output_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncAsync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],\n OutputGuardrail[TContext_co],", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "output_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def output_guardrail(\n *,\n name: str | None = None,\n) -> Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],\n OutputGuardrail[TContext_co],\n]: ...\ndef output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co]\n | _OutputGuardrailFuncAsync[TContext_co]", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "output_guardrail", - "kind": 2, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "def output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co]\n | _OutputGuardrailFuncAsync[TContext_co]\n | None = None,\n *,\n name: str | None = None,\n) -> (\n OutputGuardrail[TContext_co]\n | Callable[\n [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]],", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "TContext_co", - "kind": 5, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "TContext_co = TypeVar(\"TContext_co\", bound=Any, covariant=True)\n# For InputGuardrail\n_InputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n GuardrailFunctionOutput,\n]\n_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "_InputGuardrailFuncSync", - "kind": 5, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "_InputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n GuardrailFunctionOutput,\n]\n_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef input_guardrail(", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "_InputGuardrailFuncAsync", - "kind": 5, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "_InputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Union[str, list[TResponseInputItem]]],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef input_guardrail(\n func: _InputGuardrailFuncSync[TContext_co],\n) -> InputGuardrail[TContext_co]: ...\n@overload\ndef input_guardrail(", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "_OutputGuardrailFuncSync", - "kind": 5, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "_OutputGuardrailFuncSync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n GuardrailFunctionOutput,\n]\n_OutputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef output_guardrail(", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "_OutputGuardrailFuncAsync", - "kind": 5, - "importPath": "src.agents.guardrail", - "description": "src.agents.guardrail", - "peekOfCode": "_OutputGuardrailFuncAsync = Callable[\n [RunContextWrapper[TContext_co], \"Agent[Any]\", Any],\n Awaitable[GuardrailFunctionOutput],\n]\n@overload\ndef output_guardrail(\n func: _OutputGuardrailFuncSync[TContext_co],\n) -> OutputGuardrail[TContext_co]: ...\n@overload\ndef output_guardrail(", - "detail": "src.agents.guardrail", - "documentation": {} - }, - { - "label": "HandoffInputData", - "kind": 6, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "class HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"\n The items generated before the agent turn where the handoff was invoked.\n \"\"\"\n new_items: tuple[RunItem, ...]", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "Handoff", - "kind": 6, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "class Handoff(Generic[TContext]):\n \"\"\"A handoff is when an agent delegates a task to another agent.\n For example, in a customer support scenario you might have a \"triage agent\" that determines\n which agent should handle the user's request, and sub-agents that specialize in different\n areas like billing, account management, etc.\n \"\"\"\n tool_name: str\n \"\"\"The name of the tool that represents the handoff.\"\"\"\n tool_description: str\n \"\"\"The description of the tool that represents the handoff.\"\"\"", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "handoff", - "kind": 2, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n tool_name_override: str | None = None,\n tool_description_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...\n@overload\ndef handoff(", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "handoff", - "kind": 2, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n on_handoff: OnHandoffWithInput[THandoffInput],\n input_type: type[THandoffInput],\n tool_description_override: str | None = None,\n tool_name_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "handoff", - "kind": 2, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "def handoff(\n agent: Agent[TContext],\n *,\n on_handoff: OnHandoffWithoutInput,\n tool_description_override: str | None = None,\n tool_name_override: str | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]: ...\ndef handoff(", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "handoff", - "kind": 2, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "def handoff(\n agent: Agent[TContext],\n tool_name_override: str | None = None,\n tool_description_override: str | None = None,\n on_handoff: OnHandoffWithInput[THandoffInput] | OnHandoffWithoutInput | None = None,\n input_type: type[THandoffInput] | None = None,\n input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None,\n is_enabled: bool | Callable[[RunContextWrapper[Any], Agent[Any]], MaybeAwaitable[bool]] = True,\n) -> Handoff[TContext]:\n \"\"\"Create a handoff from an agent.", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "THandoffInput", - "kind": 5, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "THandoffInput = TypeVar(\"THandoffInput\", default=Any)\nOnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any]\nOnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "OnHandoffWithInput", - "kind": 5, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "OnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any]\nOnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "OnHandoffWithoutInput", - "kind": 5, - "importPath": "src.agents.handoffs", - "description": "src.agents.handoffs", - "peekOfCode": "OnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any]\n@dataclass(frozen=True)\nclass HandoffInputData:\n input_history: str | tuple[TResponseInputItem, ...]\n \"\"\"\n The input history before `Runner.run()` was called.\n \"\"\"\n pre_handoff_items: tuple[RunItem, ...]\n \"\"\"\n The items generated before the agent turn where the handoff was invoked.", - "detail": "src.agents.handoffs", - "documentation": {} - }, - { - "label": "RunItemBase", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item\n (i.e. `openai.types.responses.ResponseInputItemParam`).\n \"\"\"\n def to_input_item(self) -> TResponseInputItem:\n \"\"\"Converts this item into an input item suitable for passing to the model.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "MessageOutputItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class MessageOutputItem(RunItemBase[ResponseOutputMessage]):\n \"\"\"Represents a message from the LLM.\"\"\"\n raw_item: ResponseOutputMessage\n \"\"\"The raw response output message.\"\"\"\n type: Literal[\"message_output_item\"] = \"message_output_item\"\n@dataclass\nclass HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):\n \"\"\"Represents a tool call for a handoff from one agent to another.\"\"\"\n raw_item: ResponseFunctionToolCall\n \"\"\"The raw response function tool call that represents the handoff.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "HandoffCallItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]):\n \"\"\"Represents a tool call for a handoff from one agent to another.\"\"\"\n raw_item: ResponseFunctionToolCall\n \"\"\"The raw response function tool call that represents the handoff.\"\"\"\n type: Literal[\"handoff_call_item\"] = \"handoff_call_item\"\n@dataclass\nclass HandoffOutputItem(RunItemBase[TResponseInputItem]):\n \"\"\"Represents the output of a handoff.\"\"\"\n raw_item: TResponseInputItem\n \"\"\"The raw input item that represents the handoff taking place.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "HandoffOutputItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class HandoffOutputItem(RunItemBase[TResponseInputItem]):\n \"\"\"Represents the output of a handoff.\"\"\"\n raw_item: TResponseInputItem\n \"\"\"The raw input item that represents the handoff taking place.\"\"\"\n source_agent: Agent[Any]\n \"\"\"The agent that made the handoff.\"\"\"\n target_agent: Agent[Any]\n \"\"\"The agent that is being handed off to.\"\"\"\n type: Literal[\"handoff_output_item\"] = \"handoff_output_item\"\nToolCallItemTypes: TypeAlias = Union[", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "ToolCallItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class ToolCallItem(RunItemBase[ToolCallItemTypes]):\n \"\"\"Represents a tool call e.g. a function call or computer action call.\"\"\"\n raw_item: ToolCallItemTypes\n \"\"\"The raw tool call item.\"\"\"\n type: Literal[\"tool_call_item\"] = \"tool_call_item\"\n@dataclass\nclass ToolCallOutputItem(\n RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]\n):\n \"\"\"Represents the output of a tool call.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "ToolCallOutputItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class ToolCallOutputItem(\n RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]\n):\n \"\"\"Represents the output of a tool call.\"\"\"\n raw_item: FunctionCallOutput | ComputerCallOutput | LocalShellCallOutput\n \"\"\"The raw item from the model.\"\"\"\n output: Any\n \"\"\"The output of the tool call. This is whatever the tool call returned; the `raw_item`\n contains a string representation of the output.\n \"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "ReasoningItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class ReasoningItem(RunItemBase[ResponseReasoningItem]):\n \"\"\"Represents a reasoning item.\"\"\"\n raw_item: ResponseReasoningItem\n \"\"\"The raw reasoning item.\"\"\"\n type: Literal[\"reasoning_item\"] = \"reasoning_item\"\n@dataclass\nclass MCPListToolsItem(RunItemBase[McpListTools]):\n \"\"\"Represents a call to an MCP server to list tools.\"\"\"\n raw_item: McpListTools\n \"\"\"The raw MCP list tools call.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "MCPListToolsItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class MCPListToolsItem(RunItemBase[McpListTools]):\n \"\"\"Represents a call to an MCP server to list tools.\"\"\"\n raw_item: McpListTools\n \"\"\"The raw MCP list tools call.\"\"\"\n type: Literal[\"mcp_list_tools_item\"] = \"mcp_list_tools_item\"\n@dataclass\nclass MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):\n \"\"\"Represents a request for MCP approval.\"\"\"\n raw_item: McpApprovalRequest\n \"\"\"The raw MCP approval request.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "MCPApprovalRequestItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class MCPApprovalRequestItem(RunItemBase[McpApprovalRequest]):\n \"\"\"Represents a request for MCP approval.\"\"\"\n raw_item: McpApprovalRequest\n \"\"\"The raw MCP approval request.\"\"\"\n type: Literal[\"mcp_approval_request_item\"] = \"mcp_approval_request_item\"\n@dataclass\nclass MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):\n \"\"\"Represents a response to an MCP approval request.\"\"\"\n raw_item: McpApprovalResponse\n \"\"\"The raw MCP approval response.\"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "MCPApprovalResponseItem", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):\n \"\"\"Represents a response to an MCP approval request.\"\"\"\n raw_item: McpApprovalResponse\n \"\"\"The raw MCP approval response.\"\"\"\n type: Literal[\"mcp_approval_response_item\"] = \"mcp_approval_response_item\"\nRunItem: TypeAlias = Union[\n MessageOutputItem,\n HandoffCallItem,\n HandoffOutputItem,\n ToolCallItem,", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "ModelResponse", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class ModelResponse:\n output: list[TResponseOutputItem]\n \"\"\"A list of outputs (messages, tool calls, etc) generated by the model\"\"\"\n usage: Usage\n \"\"\"The usage information for the response.\"\"\"\n response_id: str | None\n \"\"\"An ID for the response which can be used to refer to the response in subsequent calls to the\n model. Not supported by all model providers.\n If using OpenAI models via the Responses API, this is the `response_id` parameter, and it can\n be passed to `Runner.run`.", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "ItemHelpers", - "kind": 6, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "class ItemHelpers:\n @classmethod\n def extract_last_content(cls, message: TResponseOutputItem) -> str:\n \"\"\"Extracts the last text content or refusal from a message.\"\"\"\n if not isinstance(message, ResponseOutputMessage):\n return \"\"\n last_content = message.content[-1]\n if isinstance(last_content, ResponseOutputText):\n return last_content.text\n elif isinstance(last_content, ResponseOutputRefusal):", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "TResponse", - "kind": 5, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "TResponse = Response\n\"\"\"A type alias for the Response type from the OpenAI SDK.\"\"\"\nTResponseInputItem = ResponseInputItemParam\n\"\"\"A type alias for the ResponseInputItemParam type from the OpenAI SDK.\"\"\"\nTResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "TResponseInputItem", - "kind": 5, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "TResponseInputItem = ResponseInputItemParam\n\"\"\"A type alias for the ResponseInputItemParam type from the OpenAI SDK.\"\"\"\nTResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "TResponseOutputItem", - "kind": 5, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "TResponseOutputItem = ResponseOutputItem\n\"\"\"A type alias for the ResponseOutputItem type from the OpenAI SDK.\"\"\"\nTResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "TResponseStreamEvent", - "kind": 5, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "TResponseStreamEvent = ResponseStreamEvent\n\"\"\"A type alias for the ResponseStreamEvent type from the OpenAI SDK.\"\"\"\nT = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "T", - "kind": 5, - "importPath": "src.agents.items", - "description": "src.agents.items", - "peekOfCode": "T = TypeVar(\"T\", bound=Union[TResponseOutputItem, TResponseInputItem])\n@dataclass\nclass RunItemBase(Generic[T], abc.ABC):\n agent: Agent[Any]\n \"\"\"The agent whose run caused this item to be generated.\"\"\"\n raw_item: T\n \"\"\"The raw Responses item from the run. This will always be a either an output item (i.e.\n `openai.types.responses.ResponseOutputItem` or an input item\n (i.e. `openai.types.responses.ResponseInputItemParam`).\n \"\"\"", - "detail": "src.agents.items", - "documentation": {} - }, - { - "label": "RunHooksBase", - "kind": 6, - "importPath": "src.agents.lifecycle", - "description": "src.agents.lifecycle", - "peekOfCode": "class RunHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events in an agent run. Subclass and\n override the methods you need.\n \"\"\"\n async def on_agent_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the current agent changes.\"\"\"\n pass\n async def on_agent_end(\n self,\n context: RunContextWrapper[TContext],", - "detail": "src.agents.lifecycle", - "documentation": {} - }, - { - "label": "AgentHooksBase", - "kind": 6, - "importPath": "src.agents.lifecycle", - "description": "src.agents.lifecycle", - "peekOfCode": "class AgentHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events for a specific agent. You can\n set this on `agent.hooks` to receive events for that specific agent.\n Subclass and override the methods you need.\n \"\"\"\n async def on_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the running agent is changed to this\n agent.\"\"\"\n pass\n async def on_end(", - "detail": "src.agents.lifecycle", - "documentation": {} - }, - { - "label": "TAgent", - "kind": 5, - "importPath": "src.agents.lifecycle", - "description": "src.agents.lifecycle", - "peekOfCode": "TAgent = TypeVar(\"TAgent\", bound=AgentBase, default=AgentBase)\nclass RunHooksBase(Generic[TContext, TAgent]):\n \"\"\"A class that receives callbacks on various lifecycle events in an agent run. Subclass and\n override the methods you need.\n \"\"\"\n async def on_agent_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:\n \"\"\"Called before the agent is invoked. Called each time the current agent changes.\"\"\"\n pass\n async def on_agent_end(\n self,", - "detail": "src.agents.lifecycle", - "documentation": {} - }, - { - "label": "RunHooks", - "kind": 5, - "importPath": "src.agents.lifecycle", - "description": "src.agents.lifecycle", - "peekOfCode": "RunHooks = RunHooksBase[TContext, Agent]\n\"\"\"Run hooks when using `Agent`.\"\"\"\nAgentHooks = AgentHooksBase[TContext, Agent]\n\"\"\"Agent hooks for `Agent`s.\"\"\"", - "detail": "src.agents.lifecycle", - "documentation": {} - }, - { - "label": "AgentHooks", - "kind": 5, - "importPath": "src.agents.lifecycle", - "description": "src.agents.lifecycle", - "peekOfCode": "AgentHooks = AgentHooksBase[TContext, Agent]\n\"\"\"Agent hooks for `Agent`s.\"\"\"", - "detail": "src.agents.lifecycle", - "documentation": {} - }, - { - "label": "logger", - "kind": 5, - "importPath": "src.agents.logger", - "description": "src.agents.logger", - "peekOfCode": "logger = logging.getLogger(\"openai.agents\")", - "detail": "src.agents.logger", - "documentation": {} - }, - { - "label": "_OmitTypeAnnotation", - "kind": 6, - "importPath": "src.agents.model_settings", - "description": "src.agents.model_settings", - "peekOfCode": "class _OmitTypeAnnotation:\n @classmethod\n def __get_pydantic_core_schema__(\n cls,\n _source_type: Any,\n _handler: GetCoreSchemaHandler,\n ) -> core_schema.CoreSchema:\n def validate_from_none(value: None) -> _Omit:\n return _Omit()\n from_none_schema = core_schema.chain_schema(", - "detail": "src.agents.model_settings", - "documentation": {} - }, - { - "label": "MCPToolChoice", - "kind": 6, - "importPath": "src.agents.model_settings", - "description": "src.agents.model_settings", - "peekOfCode": "class MCPToolChoice:\n server_label: str\n name: str\nOmit = Annotated[_Omit, _OmitTypeAnnotation]\nHeaders: TypeAlias = Mapping[str, Union[str, Omit]]\nToolChoice: TypeAlias = Union[Literal[\"auto\", \"required\", \"none\"], str, MCPToolChoice, None]\n@dataclass\nclass ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,", - "detail": "src.agents.model_settings", - "documentation": {} - }, - { - "label": "ModelSettings", - "kind": 6, - "importPath": "src.agents.model_settings", - "description": "src.agents.model_settings", - "peekOfCode": "class ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,\n top_p, penalties, truncation, etc.).\n Not all models/providers support all of these parameters, so please check the API documentation\n for the specific model and provider you are using.\n \"\"\"\n temperature: float | None = None\n \"\"\"The temperature to use when calling the model.\"\"\"\n top_p: float | None = None", - "detail": "src.agents.model_settings", - "documentation": {} - }, - { - "label": "Omit", - "kind": 5, - "importPath": "src.agents.model_settings", - "description": "src.agents.model_settings", - "peekOfCode": "Omit = Annotated[_Omit, _OmitTypeAnnotation]\nHeaders: TypeAlias = Mapping[str, Union[str, Omit]]\nToolChoice: TypeAlias = Union[Literal[\"auto\", \"required\", \"none\"], str, MCPToolChoice, None]\n@dataclass\nclass ModelSettings:\n \"\"\"Settings to use when calling an LLM.\n This class holds optional model configuration parameters (e.g. temperature,\n top_p, penalties, truncation, etc.).\n Not all models/providers support all of these parameters, so please check the API documentation\n for the specific model and provider you are using.", - "detail": "src.agents.model_settings", - "documentation": {} - }, - { - "label": "Prompt", - "kind": 6, - "importPath": "src.agents.prompts", - "description": "src.agents.prompts", - "peekOfCode": "class Prompt(TypedDict):\n \"\"\"Prompt configuration to use for interacting with an OpenAI model.\"\"\"\n id: str\n \"\"\"The unique ID of the prompt.\"\"\"\n version: NotRequired[str]\n \"\"\"Optional version of the prompt.\"\"\"\n variables: NotRequired[dict[str, ResponsesPromptVariables]]\n \"\"\"Optional variables to substitute into the prompt.\"\"\"\n@dataclass\nclass GenerateDynamicPromptData:", - "detail": "src.agents.prompts", - "documentation": {} - }, - { - "label": "GenerateDynamicPromptData", - "kind": 6, - "importPath": "src.agents.prompts", - "description": "src.agents.prompts", - "peekOfCode": "class GenerateDynamicPromptData:\n \"\"\"Inputs to a function that allows you to dynamically generate a prompt.\"\"\"\n context: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n agent: Agent[Any]\n \"\"\"The agent for which the prompt is being generated.\"\"\"\nDynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]\n\"\"\"A function that dynamically generates a prompt.\"\"\"\nclass PromptUtil:\n @staticmethod", - "detail": "src.agents.prompts", - "documentation": {} - }, - { - "label": "PromptUtil", - "kind": 6, - "importPath": "src.agents.prompts", - "description": "src.agents.prompts", - "peekOfCode": "class PromptUtil:\n @staticmethod\n async def to_model_input(\n prompt: Prompt | DynamicPromptFunction | None,\n context: RunContextWrapper[Any],\n agent: Agent[Any],\n ) -> ResponsePromptParam | None:\n if prompt is None:\n return None\n resolved_prompt: Prompt", - "detail": "src.agents.prompts", - "documentation": {} - }, - { - "label": "DynamicPromptFunction", - "kind": 5, - "importPath": "src.agents.prompts", - "description": "src.agents.prompts", - "peekOfCode": "DynamicPromptFunction = Callable[[GenerateDynamicPromptData], MaybeAwaitable[Prompt]]\n\"\"\"A function that dynamically generates a prompt.\"\"\"\nclass PromptUtil:\n @staticmethod\n async def to_model_input(\n prompt: Prompt | DynamicPromptFunction | None,\n context: RunContextWrapper[Any],\n agent: Agent[Any],\n ) -> ResponsePromptParam | None:\n if prompt is None:", - "detail": "src.agents.prompts", - "documentation": {} - }, - { - "label": "RunResultBase", - "kind": 6, - "importPath": "src.agents.result", - "description": "src.agents.result", - "peekOfCode": "class RunResultBase(abc.ABC):\n input: str | list[TResponseInputItem]\n \"\"\"The original input items i.e. the items before run() was called. This may be a mutated\n version of the input, if there are handoff input filters that mutate the input.\n \"\"\"\n new_items: list[RunItem]\n \"\"\"The new items generated during the agent run. These include things like new messages, tool\n calls and their outputs, etc.\n \"\"\"\n raw_responses: list[ModelResponse]", - "detail": "src.agents.result", - "documentation": {} - }, - { - "label": "RunResult", - "kind": 6, - "importPath": "src.agents.result", - "description": "src.agents.result", - "peekOfCode": "class RunResult(RunResultBase):\n _last_agent: Agent[Any]\n @property\n def last_agent(self) -> Agent[Any]:\n \"\"\"The last agent that was run.\"\"\"\n return self._last_agent\n def __str__(self) -> str:\n return pretty_print_result(self)\n@dataclass\nclass RunResultStreaming(RunResultBase):", - "detail": "src.agents.result", - "documentation": {} - }, - { - "label": "RunResultStreaming", - "kind": 6, - "importPath": "src.agents.result", - "description": "src.agents.result", - "peekOfCode": "class RunResultStreaming(RunResultBase):\n \"\"\"The result of an agent run in streaming mode. You can use the `stream_events` method to\n receive semantic events as they are generated.\n The streaming method will raise:\n - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit.\n - A GuardrailTripwireTriggered exception if a guardrail is tripped.\n \"\"\"\n current_agent: Agent[Any]\n \"\"\"The current agent that is running.\"\"\"\n current_turn: int", - "detail": "src.agents.result", - "documentation": {} - }, - { - "label": "T", - "kind": 5, - "importPath": "src.agents.result", - "description": "src.agents.result", - "peekOfCode": "T = TypeVar(\"T\")\n@dataclass\nclass RunResultBase(abc.ABC):\n input: str | list[TResponseInputItem]\n \"\"\"The original input items i.e. the items before run() was called. This may be a mutated\n version of the input, if there are handoff input filters that mutate the input.\n \"\"\"\n new_items: list[RunItem]\n \"\"\"The new items generated during the agent run. These include things like new messages, tool\n calls and their outputs, etc.", - "detail": "src.agents.result", - "documentation": {} - }, - { - "label": "RunConfig", - "kind": 6, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "class RunConfig:\n \"\"\"Configures settings for the entire agent run.\"\"\"\n model: str | Model | None = None\n \"\"\"The model to use for the entire agent run. If set, will override the model set on every\n agent. The model_provider passed in below must be able to resolve this model name.\n \"\"\"\n model_provider: ModelProvider = field(default_factory=MultiProvider)\n \"\"\"The model provider to use when looking up string model names. Defaults to OpenAI.\"\"\"\n model_settings: ModelSettings | None = None\n \"\"\"Configure global model settings. Any non-null values will override the agent-specific model", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "RunOptions", - "kind": 6, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "class RunOptions(TypedDict, Generic[TContext]):\n \"\"\"Arguments for ``AgentRunner`` methods.\"\"\"\n context: NotRequired[TContext | None]\n \"\"\"The context for the run.\"\"\"\n max_turns: NotRequired[int]\n \"\"\"The maximum number of turns to run for.\"\"\"\n hooks: NotRequired[RunHooks[TContext] | None]\n \"\"\"Lifecycle hooks for the run.\"\"\"\n run_config: NotRequired[RunConfig | None]\n \"\"\"Run configuration.\"\"\"", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "Runner", - "kind": 6, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "class Runner:\n @classmethod\n async def run(\n cls,\n starting_agent: Agent[TContext],\n input: str | list[TResponseInputItem],\n *,\n context: TContext | None = None,\n max_turns: int = DEFAULT_MAX_TURNS,\n hooks: RunHooks[TContext] | None = None,", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "AgentRunner", - "kind": 6, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "class AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly or subclassed.\n \"\"\"\n async def run(\n self,\n starting_agent: Agent[TContext],\n input: str | list[TResponseInputItem],\n **kwargs: Unpack[RunOptions[TContext]],", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "set_default_agent_runner", - "kind": 2, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "def set_default_agent_runner(runner: AgentRunner | None) -> None:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n DEFAULT_AGENT_RUNNER = runner or AgentRunner()\ndef get_default_agent_runner() -> AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "get_default_agent_runner", - "kind": 2, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "def get_default_agent_runner() -> AgentRunner:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n return DEFAULT_AGENT_RUNNER\n@dataclass\nclass RunConfig:\n \"\"\"Configures settings for the entire agent run.\"\"\"", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "DEFAULT_MAX_TURNS", - "kind": 5, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "DEFAULT_MAX_TURNS = 10\nDEFAULT_AGENT_RUNNER: AgentRunner = None # type: ignore\n# the value is set at the end of the module\ndef set_default_agent_runner(runner: AgentRunner | None) -> None:\n \"\"\"\n WARNING: this class is experimental and not part of the public API\n It should not be used directly.\n \"\"\"\n global DEFAULT_AGENT_RUNNER\n DEFAULT_AGENT_RUNNER = runner or AgentRunner()", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "DEFAULT_AGENT_RUNNER", - "kind": 5, - "importPath": "src.agents.run", - "description": "src.agents.run", - "peekOfCode": "DEFAULT_AGENT_RUNNER = AgentRunner()", - "detail": "src.agents.run", - "documentation": {} - }, - { - "label": "RunContextWrapper", - "kind": 6, - "importPath": "src.agents.run_context", - "description": "src.agents.run_context", - "peekOfCode": "class RunContextWrapper(Generic[TContext]):\n \"\"\"This wraps the context object that you passed to `Runner.run()`. It also contains\n information about the usage of the agent run so far.\n NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code\n you implement, like tool functions, callbacks, hooks, etc.\n \"\"\"\n context: TContext\n \"\"\"The context object (or None), passed by you to `Runner.run()`\"\"\"\n usage: Usage = field(default_factory=Usage)\n \"\"\"The usage of the agent run so far. For streamed responses, the usage will be stale until the", - "detail": "src.agents.run_context", - "documentation": {} - }, - { - "label": "TContext", - "kind": 5, - "importPath": "src.agents.run_context", - "description": "src.agents.run_context", - "peekOfCode": "TContext = TypeVar(\"TContext\", default=Any)\n@dataclass\nclass RunContextWrapper(Generic[TContext]):\n \"\"\"This wraps the context object that you passed to `Runner.run()`. It also contains\n information about the usage of the agent run so far.\n NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code\n you implement, like tool functions, callbacks, hooks, etc.\n \"\"\"\n context: TContext\n \"\"\"The context object (or None), passed by you to `Runner.run()`\"\"\"", - "detail": "src.agents.run_context", - "documentation": {} - }, - { - "label": "RawResponsesStreamEvent", - "kind": 6, - "importPath": "src.agents.stream_events", - "description": "src.agents.stream_events", - "peekOfCode": "class RawResponsesStreamEvent:\n \"\"\"Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through\n from the LLM.\n \"\"\"\n data: TResponseStreamEvent\n \"\"\"The raw responses streaming event from the LLM.\"\"\"\n type: Literal[\"raw_response_event\"] = \"raw_response_event\"\n \"\"\"The type of the event.\"\"\"\n@dataclass\nclass RunItemStreamEvent:", - "detail": "src.agents.stream_events", - "documentation": {} - }, - { - "label": "RunItemStreamEvent", - "kind": 6, - "importPath": "src.agents.stream_events", - "description": "src.agents.stream_events", - "peekOfCode": "class RunItemStreamEvent:\n \"\"\"Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will\n generate these events for new messages, tool calls, tool outputs, handoffs, etc.\n \"\"\"\n name: Literal[\n \"message_output_created\",\n \"handoff_requested\",\n # This is misspelled, but we can't change it because that would be a breaking change\n \"handoff_occured\",\n \"tool_called\",", - "detail": "src.agents.stream_events", - "documentation": {} - }, - { - "label": "AgentUpdatedStreamEvent", - "kind": 6, - "importPath": "src.agents.stream_events", - "description": "src.agents.stream_events", - "peekOfCode": "class AgentUpdatedStreamEvent:\n \"\"\"Event that notifies that there is a new agent running.\"\"\"\n new_agent: Agent[Any]\n \"\"\"The new agent.\"\"\"\n type: Literal[\"agent_updated_stream_event\"] = \"agent_updated_stream_event\"\nStreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent]\n\"\"\"A streaming event from an agent.\"\"\"", - "detail": "src.agents.stream_events", - "documentation": {} - }, - { - "label": "ensure_strict_json_schema", - "kind": 2, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "def ensure_strict_json_schema(\n schema: dict[str, Any],\n) -> dict[str, Any]:\n \"\"\"Mutates the given JSON schema to ensure it conforms to the `strict` standard\n that the OpenAI API expects.\n \"\"\"\n if schema == {}:\n return _EMPTY_SCHEMA\n return _ensure_strict_json_schema(schema, path=(), root=schema)\n# Adapted from https://github.com/openai/openai-python/blob/main/src/openai/lib/_pydantic.py", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "resolve_ref", - "kind": 2, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "def resolve_ref(*, root: dict[str, object], ref: str) -> object:\n if not ref.startswith(\"#/\"):\n raise ValueError(f\"Unexpected $ref format {ref!r}; Does not start with #/\")\n path = ref[2:].split(\"/\")\n resolved = root\n for key in path:\n value = resolved[key]\n assert is_dict(value), (\n f\"encountered non-dictionary entry while resolving {ref} - {resolved}\"\n )", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "is_dict", - "kind": 2, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "def is_dict(obj: object) -> TypeGuard[dict[str, object]]:\n # just pretend that we know there are only `str` keys\n # as that check is not worth the performance cost\n return isinstance(obj, dict)\ndef is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)\ndef has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "is_list", - "kind": 2, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)\ndef has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1\n if i > n:\n return True\n return False", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "has_more_than_n_keys", - "kind": 2, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:\n i = 0\n for _ in obj.keys():\n i += 1\n if i > n:\n return True\n return False", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "_EMPTY_SCHEMA", - "kind": 5, - "importPath": "src.agents.strict_schema", - "description": "src.agents.strict_schema", - "peekOfCode": "_EMPTY_SCHEMA = {\n \"additionalProperties\": False,\n \"type\": \"object\",\n \"properties\": {},\n \"required\": [],\n}\ndef ensure_strict_json_schema(\n schema: dict[str, Any],\n) -> dict[str, Any]:\n \"\"\"Mutates the given JSON schema to ensure it conforms to the `strict` standard", - "detail": "src.agents.strict_schema", - "documentation": {} - }, - { - "label": "FunctionToolResult", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"\n output: Any\n \"\"\"The output of the tool.\"\"\"\n run_item: RunItem\n \"\"\"The run item that was produced as a result of the tool call.\"\"\"\n@dataclass\nclass FunctionTool:\n \"\"\"A tool that wraps a function. In most cases, you should use the `function_tool` helpers to", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "FunctionTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class FunctionTool:\n \"\"\"A tool that wraps a function. In most cases, you should use the `function_tool` helpers to\n create a FunctionTool, as they let you easily wrap a Python function.\n \"\"\"\n name: str\n \"\"\"The name of the tool, as shown to the LLM. Generally the name of the function.\"\"\"\n description: str\n \"\"\"A description of the tool, as shown to the LLM.\"\"\"\n params_json_schema: dict[str, Any]\n \"\"\"The JSON schema for the tool's parameters.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "FileSearchTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class FileSearchTool:\n \"\"\"A hosted tool that lets the LLM search through a vector store. Currently only supported with\n OpenAI models, using the Responses API.\n \"\"\"\n vector_store_ids: list[str]\n \"\"\"The IDs of the vector stores to search.\"\"\"\n max_num_results: int | None = None\n \"\"\"The maximum number of results to return.\"\"\"\n include_search_results: bool = False\n \"\"\"Whether to include the search results in the output produced by the LLM.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "WebSearchTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class WebSearchTool:\n \"\"\"A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models,\n using the Responses API.\n \"\"\"\n user_location: UserLocation | None = None\n \"\"\"Optional location for the search. Lets you customize results to be relevant to a location.\"\"\"\n search_context_size: Literal[\"low\", \"medium\", \"high\"] = \"medium\"\n \"\"\"The amount of context to use for the search.\"\"\"\n @property\n def name(self):", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ComputerTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class ComputerTool:\n \"\"\"A hosted tool that lets the LLM control a computer.\"\"\"\n computer: Computer | AsyncComputer\n \"\"\"The computer implementation, which describes the environment and dimensions of the computer,\n as well as implements the computer actions like click, screenshot, etc.\n \"\"\"\n on_safety_check: Callable[[ComputerToolSafetyCheckData], MaybeAwaitable[bool]] | None = None\n \"\"\"Optional callback to acknowledge computer tool safety checks.\"\"\"\n @property\n def name(self):", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ComputerToolSafetyCheckData", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class ComputerToolSafetyCheckData:\n \"\"\"Information about a computer tool safety check.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n agent: Agent[Any]\n \"\"\"The agent performing the computer action.\"\"\"\n tool_call: ResponseComputerToolCall\n \"\"\"The computer tool call.\"\"\"\n safety_check: PendingSafetyCheck\n \"\"\"The pending safety check to acknowledge.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "MCPToolApprovalRequest", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class MCPToolApprovalRequest:\n \"\"\"A request to approve a tool call.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n data: McpApprovalRequest\n \"\"\"The data from the MCP tool approval request.\"\"\"\nclass MCPToolApprovalFunctionResult(TypedDict):\n \"\"\"The result of an MCP tool approval function.\"\"\"\n approve: bool\n \"\"\"Whether to approve the tool call.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "MCPToolApprovalFunctionResult", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class MCPToolApprovalFunctionResult(TypedDict):\n \"\"\"The result of an MCP tool approval function.\"\"\"\n approve: bool\n \"\"\"Whether to approve the tool call.\"\"\"\n reason: NotRequired[str]\n \"\"\"An optional reason, if rejected.\"\"\"\nMCPToolApprovalFunction = Callable[\n [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]\n]\n\"\"\"A function that approves or rejects a tool call.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "HostedMCPTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class HostedMCPTool:\n \"\"\"A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and\n call tools, without requiring a round trip back to your code.\n If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible\n environment, or you just prefer to run tool calls locally, then you can instead use the servers\n in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent.\"\"\"\n tool_config: Mcp\n \"\"\"The MCP tool config, which includes the server URL and other settings.\"\"\"\n on_approval_request: MCPToolApprovalFunction | None = None\n \"\"\"An optional function that will be called if approval is requested for an MCP tool. If not", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "CodeInterpreterTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class CodeInterpreterTool:\n \"\"\"A tool that allows the LLM to execute code in a sandboxed environment.\"\"\"\n tool_config: CodeInterpreter\n \"\"\"The tool config, which includes the container and other settings.\"\"\"\n @property\n def name(self):\n return \"code_interpreter\"\n@dataclass\nclass ImageGenerationTool:\n \"\"\"A tool that allows the LLM to generate images.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ImageGenerationTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class ImageGenerationTool:\n \"\"\"A tool that allows the LLM to generate images.\"\"\"\n tool_config: ImageGeneration\n \"\"\"The tool config, which image generation settings.\"\"\"\n @property\n def name(self):\n return \"image_generation\"\n@dataclass\nclass LocalShellCommandRequest:\n \"\"\"A request to execute a command on a shell.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "LocalShellCommandRequest", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class LocalShellCommandRequest:\n \"\"\"A request to execute a command on a shell.\"\"\"\n ctx_wrapper: RunContextWrapper[Any]\n \"\"\"The run context.\"\"\"\n data: LocalShellCall\n \"\"\"The data from the local shell tool call.\"\"\"\nLocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]\n\"\"\"A function that executes a command on a shell.\"\"\"\n@dataclass\nclass LocalShellTool:", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "LocalShellTool", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class LocalShellTool:\n \"\"\"A tool that allows the LLM to execute commands on a shell.\"\"\"\n executor: LocalShellExecutor\n \"\"\"A function that executes a command on a shell.\"\"\"\n @property\n def name(self):\n return \"local_shell\"\nTool = Union[\n FunctionTool,\n FileSearchTool,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolRunFunction", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class ToolRunFunction:\n tool_call: ResponseFunctionToolCall\n function_tool: FunctionTool\n@dataclass\nclass ToolRunComputerAction:\n tool_call: ResponseComputerToolCall\n computer_tool: ComputerTool\nAction = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolRunComputerAction", - "kind": 6, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "class ToolRunComputerAction:\n tool_call: ResponseComputerToolCall\n computer_tool: ComputerTool\nAction = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "default_tool_error_function", - "kind": 2, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "function_tool", - "kind": 2, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "def function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "function_tool", - "kind": 2, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "def function_tool(\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,\n) -> Callable[[ToolFunction[...]], FunctionTool]:", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "function_tool", - "kind": 2, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "def function_tool(\n func: ToolFunction[...] | None = None,\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = default_tool_error_function,\n strict_mode: bool = True,\n is_enabled: bool | Callable[[RunContextWrapper[Any], AgentBase], MaybeAwaitable[bool]] = True,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolParams", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolParams = ParamSpec(\"ToolParams\")\nToolFunctionWithoutContext = Callable[ToolParams, Any]\nToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolFunctionWithoutContext", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolFunctionWithoutContext = Callable[ToolParams, Any]\nToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolFunctionWithContext", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any]\nToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolFunctionWithToolContext", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolFunctionWithToolContext = Callable[Concatenate[ToolContext, ToolParams], Any]\nToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolFunction", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolFunction = Union[\n ToolFunctionWithoutContext[ToolParams],\n ToolFunctionWithContext[ToolParams],\n ToolFunctionWithToolContext[ToolParams],\n]\n@dataclass\nclass FunctionToolResult:\n tool: FunctionTool\n \"\"\"The tool that was run.\"\"\"\n output: Any", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "MCPToolApprovalFunction", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "MCPToolApprovalFunction = Callable[\n [MCPToolApprovalRequest], MaybeAwaitable[MCPToolApprovalFunctionResult]\n]\n\"\"\"A function that approves or rejects a tool call.\"\"\"\n@dataclass\nclass HostedMCPTool:\n \"\"\"A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and\n call tools, without requiring a round trip back to your code.\n If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible\n environment, or you just prefer to run tool calls locally, then you can instead use the servers", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "LocalShellExecutor", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]\n\"\"\"A function that executes a command on a shell.\"\"\"\n@dataclass\nclass LocalShellTool:\n \"\"\"A tool that allows the LLM to execute commands on a shell.\"\"\"\n executor: LocalShellExecutor\n \"\"\"A function that executes a command on a shell.\"\"\"\n @property\n def name(self):\n return \"local_shell\"", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "Tool", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "Tool = Union[\n FunctionTool,\n FileSearchTool,\n WebSearchTool,\n ComputerTool,\n HostedMCPTool,\n LocalShellTool,\n ImageGenerationTool,\n CodeInterpreterTool,\n]", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "Action", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "Action = Union[ToolRunFunction, ToolRunComputerAction]\n\"\"\"An action that can be performed by an agent. It contains the tool call and the tool\"\"\"\ndef default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str:\n \"\"\"The default tool error function, which just returns a generic error message.\"\"\"\n return f\"An error occurred while running the tool. Please try again. Error: {str(error)}\"\nToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolErrorFunction", - "kind": 5, - "importPath": "src.agents.tool", - "description": "src.agents.tool", - "peekOfCode": "ToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]]\n@overload\ndef function_tool(\n func: ToolFunction[...],\n *,\n name_override: str | None = None,\n description_override: str | None = None,\n docstring_style: DocstringStyle | None = None,\n use_docstring_info: bool = True,\n failure_error_function: ToolErrorFunction | None = None,", - "detail": "src.agents.tool", - "documentation": {} - }, - { - "label": "ToolContext", - "kind": 6, - "importPath": "src.agents.tool_context", - "description": "src.agents.tool_context", - "peekOfCode": "class ToolContext(RunContextWrapper[TContext]):\n \"\"\"The context of a tool call.\"\"\"\n tool_name: str = field(default_factory=_assert_must_pass_tool_name)\n \"\"\"The name of the tool being invoked.\"\"\"\n tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)\n \"\"\"The ID of the tool call.\"\"\"\n @classmethod\n def from_agent_context(\n cls,\n context: RunContextWrapper[TContext],", - "detail": "src.agents.tool_context", - "documentation": {} - }, - { - "label": "Usage", - "kind": 6, - "importPath": "src.agents.usage", - "description": "src.agents.usage", - "peekOfCode": "class Usage:\n requests: int = 0\n \"\"\"Total requests made to the LLM API.\"\"\"\n input_tokens: int = 0\n \"\"\"Total input tokens sent, across all requests.\"\"\"\n input_tokens_details: InputTokensDetails = field(\n default_factory=lambda: InputTokensDetails(cached_tokens=0)\n )\n \"\"\"Details about the input tokens, matching responses API usage details.\"\"\"\n output_tokens: int = 0", - "detail": "src.agents.usage", - "documentation": {} - }, - { - "label": "StreamHandler", - "kind": 6, - "importPath": "tests.fastapi.streaming_app", - "description": "tests.fastapi.streaming_app", - "peekOfCode": "class StreamHandler:\n def __init__(self, result: RunResultStreaming):\n self.result = result\n async def stream_events(self) -> AsyncIterator[str]:\n async for event in self.result.stream_events():\n yield f\"{event.type}\\n\\n\"", - "detail": "tests.fastapi.streaming_app", - "documentation": {} - }, - { - "label": "agent", - "kind": 5, - "importPath": "tests.fastapi.streaming_app", - "description": "tests.fastapi.streaming_app", - "peekOfCode": "agent = Agent(\n name=\"Assistant\",\n instructions=\"You are a helpful assistant.\",\n)\napp = FastAPI()\n@app.post(\"/stream\")\nasync def stream():\n result = Runner.run_streamed(agent, input=\"Tell me a joke\")\n stream_handler = StreamHandler(result)\n return StreamingResponse(stream_handler.stream_events(), media_type=\"application/x-ndjson\")", - "detail": "tests.fastapi.streaming_app", - "documentation": {} - }, - { - "label": "app", - "kind": 5, - "importPath": "tests.fastapi.streaming_app", - "description": "tests.fastapi.streaming_app", - "peekOfCode": "app = FastAPI()\n@app.post(\"/stream\")\nasync def stream():\n result = Runner.run_streamed(agent, input=\"Tell me a joke\")\n stream_handler = StreamHandler(result)\n return StreamingResponse(stream_handler.stream_events(), media_type=\"application/x-ndjson\")\nclass StreamHandler:\n def __init__(self, result: RunResultStreaming):\n self.result = result\n async def stream_events(self) -> AsyncIterator[str]:", - "detail": "tests.fastapi.streaming_app", - "documentation": {} - }, - { - "label": "pytest_ignore_collect", - "kind": 2, - "importPath": "tests.mcp.conftest", - "description": "tests.mcp.conftest", - "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", - "detail": "tests.mcp.conftest", - "documentation": {} - }, - { - "label": "DummyStream", - "kind": 6, - "importPath": "tests.mcp.helpers", - "description": "tests.mcp.helpers", - "peekOfCode": "class DummyStream:\n async def send(self, msg):\n pass\n async def receive(self):\n raise Exception(\"Dummy receive not implemented\")\nclass DummyStreamsContextManager:\n async def __aenter__(self):\n return (DummyStream(), DummyStream())\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass", - "detail": "tests.mcp.helpers", - "documentation": {} - }, - { - "label": "DummyStreamsContextManager", - "kind": 6, - "importPath": "tests.mcp.helpers", - "description": "tests.mcp.helpers", - "peekOfCode": "class DummyStreamsContextManager:\n async def __aenter__(self):\n return (DummyStream(), DummyStream())\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass\nclass _TestFilterServer(_MCPServerWithClientSession):\n \"\"\"Minimal implementation of _MCPServerWithClientSession for testing tool filtering\"\"\"\n def __init__(self, tool_filter: ToolFilter, server_name: str):\n # Initialize parent class properly to avoid type errors\n super().__init__(", - "detail": "tests.mcp.helpers", - "documentation": {} - }, - { - "label": "_TestFilterServer", - "kind": 6, - "importPath": "tests.mcp.helpers", - "description": "tests.mcp.helpers", - "peekOfCode": "class _TestFilterServer(_MCPServerWithClientSession):\n \"\"\"Minimal implementation of _MCPServerWithClientSession for testing tool filtering\"\"\"\n def __init__(self, tool_filter: ToolFilter, server_name: str):\n # Initialize parent class properly to avoid type errors\n super().__init__(\n cache_tools_list=False,\n client_session_timeout_seconds=None,\n tool_filter=tool_filter,\n )\n self._server_name: str = server_name", - "detail": "tests.mcp.helpers", - "documentation": {} - }, - { - "label": "FakeMCPServer", - "kind": 6, - "importPath": "tests.mcp.helpers", - "description": "tests.mcp.helpers", - "peekOfCode": "class FakeMCPServer(MCPServer):\n def __init__(\n self,\n tools: list[MCPTool] | None = None,\n tool_filter: ToolFilter = None,\n server_name: str = \"fake_mcp_server\",\n ):\n self.tools: list[MCPTool] = tools or []\n self.tool_calls: list[str] = []\n self.tool_results: list[str] = []", - "detail": "tests.mcp.helpers", - "documentation": {} - }, - { - "label": "tee", - "kind": 5, - "importPath": "tests.mcp.helpers", - "description": "tests.mcp.helpers", - "peekOfCode": "tee = shutil.which(\"tee\") or \"\"\nassert tee, \"tee not found\"\n# Added dummy stream classes for patching stdio_client to avoid real I/O during tests\nclass DummyStream:\n async def send(self, msg):\n pass\n async def receive(self):\n raise Exception(\"Dummy receive not implemented\")\nclass DummyStreamsContextManager:\n async def __aenter__(self):", - "detail": "tests.mcp.helpers", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.mcp.test_mcp_util", - "description": "tests.mcp.test_mcp_util", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n baz: int\nclass Bar(BaseModel):\n qux: dict[str, str]\nBaz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema", - "detail": "tests.mcp.test_mcp_util", - "documentation": {} - }, - { - "label": "Bar", - "kind": 6, - "importPath": "tests.mcp.test_mcp_util", - "description": "tests.mcp.test_mcp_util", - "peekOfCode": "class Bar(BaseModel):\n qux: dict[str, str]\nBaz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema\n@pytest.mark.asyncio\nasync def test_get_all_function_tools():\n \"\"\"Test that the get_all_function_tools function returns all function tools from a list of MCP", - "detail": "tests.mcp.test_mcp_util", - "documentation": {} - }, - { - "label": "CrashingFakeMCPServer", - "kind": 6, - "importPath": "tests.mcp.test_mcp_util", - "description": "tests.mcp.test_mcp_util", - "peekOfCode": "class CrashingFakeMCPServer(FakeMCPServer):\n async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None):\n raise Exception(\"Crash!\")\n@pytest.mark.asyncio\nasync def test_mcp_invocation_crash_causes_error(caplog: pytest.LogCaptureFixture):\n caplog.set_level(logging.DEBUG)\n \"\"\"Test that bad JSON input errors are logged and re-raised.\"\"\"\n server = CrashingFakeMCPServer()\n server.add_tool(\"test_tool_1\", {})\n ctx = RunContextWrapper(context=None)", - "detail": "tests.mcp.test_mcp_util", - "documentation": {} - }, - { - "label": "Baz", - "kind": 5, - "importPath": "tests.mcp.test_mcp_util", - "description": "tests.mcp.test_mcp_util", - "peekOfCode": "Baz = TypeAdapter(dict[str, str])\ndef _convertible_schema() -> dict[str, Any]:\n schema = Foo.model_json_schema()\n schema[\"additionalProperties\"] = False\n return schema\n@pytest.mark.asyncio\nasync def test_get_all_function_tools():\n \"\"\"Test that the get_all_function_tools function returns all function tools from a list of MCP\n servers.\n \"\"\"", - "detail": "tests.mcp.test_mcp_util", - "documentation": {} - }, - { - "label": "FakeMCPPromptServer", - "kind": 6, - "importPath": "tests.mcp.test_prompt_server", - "description": "tests.mcp.test_prompt_server", - "peekOfCode": "class FakeMCPPromptServer(MCPServer):\n \"\"\"Fake MCP server for testing prompt functionality\"\"\"\n def __init__(self, server_name: str = \"fake_prompt_server\"):\n self.prompts: list[Any] = []\n self.prompt_results: dict[str, str] = {}\n self._server_name = server_name\n def add_prompt(self, name: str, description: str, arguments: dict[str, Any] | None = None):\n \"\"\"Add a prompt to the fake server\"\"\"\n from mcp.types import Prompt\n prompt = Prompt(name=name, description=description, arguments=[])", - "detail": "tests.mcp.test_prompt_server", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.mcp.test_runner_calls_mcp", - "description": "tests.mcp.test_runner_calls_mcp", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n baz: int\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"streaming\", [False, True])\nasync def test_runner_calls_mcp_tool_with_args(streaming: bool):\n \"\"\"Test that the runner calls an MCP tool when the model produces a tool call.\"\"\"\n server = FakeMCPServer()\n await server.connect()\n server.add_tool(\"test_tool_1\", {})", - "detail": "tests.mcp.test_runner_calls_mcp", - "documentation": {} - }, - { - "label": "CrashingClientSessionServer", - "kind": 6, - "importPath": "tests.mcp.test_server_errors", - "description": "tests.mcp.test_server_errors", - "peekOfCode": "class CrashingClientSessionServer(_MCPServerWithClientSession):\n def __init__(self):\n super().__init__(cache_tools_list=False, client_session_timeout_seconds=5)\n self.cleanup_called = False\n def create_streams(self):\n raise ValueError(\"Crash!\")\n async def cleanup(self):\n self.cleanup_called = True\n await super().cleanup()\n @property", - "detail": "tests.mcp.test_server_errors", - "documentation": {} - }, - { - "label": "create_test_agent", - "kind": 2, - "importPath": "tests.mcp.test_tool_filtering", - "description": "tests.mcp.test_tool_filtering", - "peekOfCode": "def create_test_agent(name: str = \"test_agent\") -> Agent:\n \"\"\"Create a test agent for filtering tests.\"\"\"\n return Agent(name=name, instructions=\"Test agent\")\ndef create_test_context() -> RunContextWrapper:\n \"\"\"Create a test run context for filtering tests.\"\"\"\n return RunContextWrapper(context=None)\n# === Static Tool Filtering Tests ===\n@pytest.mark.asyncio\nasync def test_static_tool_filtering():\n \"\"\"Test all static tool filtering scenarios: allowed, blocked, both, none, etc.\"\"\"", - "detail": "tests.mcp.test_tool_filtering", - "documentation": {} - }, - { - "label": "create_test_context", - "kind": 2, - "importPath": "tests.mcp.test_tool_filtering", - "description": "tests.mcp.test_tool_filtering", - "peekOfCode": "def create_test_context() -> RunContextWrapper:\n \"\"\"Create a test run context for filtering tests.\"\"\"\n return RunContextWrapper(context=None)\n# === Static Tool Filtering Tests ===\n@pytest.mark.asyncio\nasync def test_static_tool_filtering():\n \"\"\"Test all static tool filtering scenarios: allowed, blocked, both, none, etc.\"\"\"\n server = FakeMCPServer(server_name=\"test_server\")\n server.add_tool(\"tool1\", {})\n server.add_tool(\"tool2\", {})", - "detail": "tests.mcp.test_tool_filtering", - "documentation": {} - }, - { - "label": "verify_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def verify_serialization(model_settings: ModelSettings) -> None:\n \"\"\"Verify that ModelSettings can be serialized to a JSON string.\"\"\"\n json_dict = model_settings.to_json_dict()\n json_string = json.dumps(json_dict)\n assert json_string is not None\ndef test_basic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_basic_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_basic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n max_tokens=100,\n )\n # Now, lets serialize the ModelSettings instance to a JSON string\n verify_serialization(model_settings)", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_mcp_tool_choice_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_mcp_tool_choice_serialization() -> None:\n \"\"\"Tests whether ModelSettings with MCPToolChoice can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n tool_choice=MCPToolChoice(server_label=\"mcp\", name=\"mcp_tool\"),\n )\n # Now, lets serialize the ModelSettings instance to a JSON string\n verify_serialization(model_settings)\ndef test_all_fields_serialization() -> None:", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_all_fields_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_all_fields_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized to a JSON string.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n tool_choice=\"auto\",\n parallel_tool_calls=True,", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_extra_args_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_extra_args_serialization() -> None:\n \"\"\"Test that extra_args are properly serialized.\"\"\"\n model_settings = ModelSettings(\n temperature=0.5,\n extra_args={\"custom_param\": \"value\", \"another_param\": 42, \"nested\": {\"key\": \"value\"}},\n )\n json_dict = model_settings.to_json_dict()\n assert json_dict[\"extra_args\"] == {\n \"custom_param\": \"value\",\n \"another_param\": 42,", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_extra_args_resolve", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_extra_args_resolve() -> None:\n \"\"\"Test that extra_args are properly merged in the resolve method.\"\"\"\n base_settings = ModelSettings(\n temperature=0.5, extra_args={\"param1\": \"base_value\", \"param2\": \"base_only\"}\n )\n override_settings = ModelSettings(\n top_p=0.9, extra_args={\"param1\": \"override_value\", \"param3\": \"override_only\"}\n )\n resolved = base_settings.resolve(override_settings)\n # Check that regular fields are properly resolved", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_extra_args_resolve_with_none", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_extra_args_resolve_with_none() -> None:\n \"\"\"Test that resolve works properly when one side has None extra_args.\"\"\"\n # Base with extra_args, override with None\n base_settings = ModelSettings(extra_args={\"param1\": \"value1\"})\n override_settings = ModelSettings(temperature=0.8)\n resolved = base_settings.resolve(override_settings)\n assert resolved.extra_args == {\"param1\": \"value1\"}\n assert resolved.temperature == 0.8\n # Base with None, override with extra_args\n base_settings = ModelSettings(temperature=0.5)", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_extra_args_resolve_both_none", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_extra_args_resolve_both_none() -> None:\n \"\"\"Test that resolve works when both sides have None extra_args.\"\"\"\n base_settings = ModelSettings(temperature=0.5)\n override_settings = ModelSettings(top_p=0.9)\n resolved = base_settings.resolve(override_settings)\n assert resolved.extra_args is None\n assert resolved.temperature == 0.5\n assert resolved.top_p == 0.9\ndef test_pydantic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized with Pydantic.\"\"\"", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "test_pydantic_serialization", - "kind": 2, - "importPath": "tests.model_settings.test_serialization", - "description": "tests.model_settings.test_serialization", - "peekOfCode": "def test_pydantic_serialization() -> None:\n \"\"\"Tests whether ModelSettings can be serialized with Pydantic.\"\"\"\n # First, lets create a ModelSettings instance\n model_settings = ModelSettings(\n temperature=0.5,\n top_p=0.9,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n tool_choice=\"auto\",\n parallel_tool_calls=True,", - "detail": "tests.model_settings.test_serialization", - "documentation": {} - }, - { - "label": "pytest_ignore_collect", - "kind": 2, - "importPath": "tests.models.conftest", - "description": "tests.models.conftest", - "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", - "detail": "tests.models.conftest", - "documentation": {} - }, - { - "label": "test_no_prefix_is_openai", - "kind": 2, - "importPath": "tests.models.test_map", - "description": "tests.models.test_map", - "peekOfCode": "def test_no_prefix_is_openai():\n agent = Agent(model=\"gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef openai_prefix_is_openai():\n agent = Agent(model=\"openai/gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")", - "detail": "tests.models.test_map", - "documentation": {} - }, - { - "label": "openai_prefix_is_openai", - "kind": 2, - "importPath": "tests.models.test_map", - "description": "tests.models.test_map", - "peekOfCode": "def openai_prefix_is_openai():\n agent = Agent(model=\"openai/gpt-4o\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, OpenAIResponsesModel)\ndef test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, LitellmModel)", - "detail": "tests.models.test_map", - "documentation": {} - }, - { - "label": "test_litellm_prefix_is_litellm", - "kind": 2, - "importPath": "tests.models.test_map", - "description": "tests.models.test_map", - "peekOfCode": "def test_litellm_prefix_is_litellm():\n agent = Agent(model=\"litellm/foo/bar\", instructions=\"\", name=\"test\")\n model = AgentRunner._get_model(agent, RunConfig())\n assert isinstance(model, LitellmModel)", - "detail": "tests.models.test_map", - "documentation": {} - }, - { - "label": "test_can_initialize_realtime_agent", - "kind": 2, - "importPath": "tests.realtime.test_agent", - "description": "tests.realtime.test_agent", - "peekOfCode": "def test_can_initialize_realtime_agent():\n agent = RealtimeAgent(name=\"test\", instructions=\"Hello\")\n assert agent.name == \"test\"\n assert agent.instructions == \"Hello\"\n@pytest.mark.asyncio\nasync def test_dynamic_instructions():\n agent = RealtimeAgent(name=\"test\")\n assert agent.instructions is None\n def _instructions(ctx, agt) -> str:\n assert ctx.context is None", - "detail": "tests.realtime.test_agent", - "documentation": {} - }, - { - "label": "TestConversionHelperTryConvertRawMessage", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperTryConvertRawMessage:\n \"\"\"Test suite for _ConversionHelper.try_convert_raw_message method.\"\"\"\n def test_try_convert_raw_message_valid_session_update(self):\n \"\"\"Test converting a valid session.update raw message.\"\"\"\n raw_message = RealtimeModelSendRawMessage(\n message={\n \"type\": \"session.update\",\n \"other_data\": {\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "TestConversionHelperTracingConfig", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperTracingConfig:\n \"\"\"Test suite for _ConversionHelper.convert_tracing_config method.\"\"\"\n def test_convert_tracing_config_none(self):\n \"\"\"Test converting None tracing config.\"\"\"\n result = _ConversionHelper.convert_tracing_config(None)\n assert result is None\n def test_convert_tracing_config_auto(self):\n \"\"\"Test converting 'auto' tracing config.\"\"\"\n result = _ConversionHelper.convert_tracing_config(\"auto\")\n assert result == \"auto\"", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "TestConversionHelperUserInput", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperUserInput:\n \"\"\"Test suite for _ConversionHelper user input conversion methods.\"\"\"\n def test_convert_user_input_to_conversation_item_string(self):\n \"\"\"Test converting string user input to conversation item.\"\"\"\n event = RealtimeModelSendUserInput(user_input=\"Hello, world!\")\n result = _ConversionHelper.convert_user_input_to_conversation_item(event)\n assert isinstance(result, ConversationItem)\n assert result.type == \"message\"\n assert result.role == \"user\"\n assert result.content is not None", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "TestConversionHelperAudio", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperAudio:\n \"\"\"Test suite for _ConversionHelper.convert_audio_to_input_audio_buffer_append.\"\"\"\n def test_convert_audio_to_input_audio_buffer_append(self):\n \"\"\"Test converting audio data to input audio buffer append event.\"\"\"\n audio_data = b\"test audio data\"\n event = RealtimeModelSendAudio(audio=audio_data, commit=False)\n result = _ConversionHelper.convert_audio_to_input_audio_buffer_append(event)\n assert isinstance(result, InputAudioBufferAppendEvent)\n assert result.type == \"input_audio_buffer.append\"\n # Verify base64 encoding", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "TestConversionHelperToolOutput", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperToolOutput:\n \"\"\"Test suite for _ConversionHelper.convert_tool_output method.\"\"\"\n def test_convert_tool_output(self):\n \"\"\"Test converting tool output to conversation item create event.\"\"\"\n mock_tool_call = Mock()\n mock_tool_call.call_id = \"call_123\"\n event = RealtimeModelSendToolOutput(\n tool_call=mock_tool_call,\n output=\"Function executed successfully\",\n start_response=False,", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "TestConversionHelperInterrupt", - "kind": 6, - "importPath": "tests.realtime.test_conversion_helpers", - "description": "tests.realtime.test_conversion_helpers", - "peekOfCode": "class TestConversionHelperInterrupt:\n \"\"\"Test suite for _ConversionHelper.convert_interrupt method.\"\"\"\n def test_convert_interrupt(self):\n \"\"\"Test converting interrupt parameters to conversation item truncate event.\"\"\"\n current_item_id = \"item_789\"\n current_audio_content_index = 2\n elapsed_time_ms = 1500\n result = _ConversionHelper.convert_interrupt(\n current_item_id, current_audio_content_index, elapsed_time_ms\n )", - "detail": "tests.realtime.test_conversion_helpers", - "documentation": {} - }, - { - "label": "test_user_message_conversion", - "kind": 2, - "importPath": "tests.realtime.test_item_parsing", - "description": "tests.realtime.test_item_parsing", - "peekOfCode": "def test_user_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"user\",\n content=[\n ConversationItemContent(\n id=None, audio=None, text=None, transcript=None, type=\"input_text\"\n )\n ],", - "detail": "tests.realtime.test_item_parsing", - "documentation": {} - }, - { - "label": "test_assistant_message_conversion", - "kind": 2, - "importPath": "tests.realtime.test_item_parsing", - "description": "tests.realtime.test_item_parsing", - "peekOfCode": "def test_assistant_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"assistant\",\n content=[\n ConversationItemContent(id=None, audio=None, text=None, transcript=None, type=\"text\")\n ],\n )\n converted: RealtimeMessageItem = _ConversionHelper.conversation_item_to_realtime_message_item(", - "detail": "tests.realtime.test_item_parsing", - "documentation": {} - }, - { - "label": "test_system_message_conversion", - "kind": 2, - "importPath": "tests.realtime.test_item_parsing", - "description": "tests.realtime.test_item_parsing", - "peekOfCode": "def test_system_message_conversion() -> None:\n item = ConversationItem(\n id=\"123\",\n type=\"message\",\n role=\"system\",\n content=[\n ConversationItemContent(\n id=None, audio=None, text=None, transcript=None, type=\"input_text\"\n )\n ],", - "detail": "tests.realtime.test_item_parsing", - "documentation": {} - }, - { - "label": "test_all_events_have_type", - "kind": 2, - "importPath": "tests.realtime.test_model_events", - "description": "tests.realtime.test_model_events", - "peekOfCode": "def test_all_events_have_type() -> None:\n \"\"\"Test that all events have a type.\"\"\"\n events = get_args(RealtimeModelEvent)\n assert len(events) > 0\n for event in events:\n assert event.type is not None\n assert isinstance(event.type, str)", - "detail": "tests.realtime.test_model_events", - "documentation": {} - }, - { - "label": "TestOpenAIRealtimeWebSocketModel", - "kind": 6, - "importPath": "tests.realtime.test_openai_realtime", - "description": "tests.realtime.test_openai_realtime", - "peekOfCode": "class TestOpenAIRealtimeWebSocketModel:\n \"\"\"Test suite for OpenAIRealtimeWebSocketModel connection and event handling.\"\"\"\n @pytest.fixture\n def model(self):\n \"\"\"Create a fresh model instance for each test.\"\"\"\n return OpenAIRealtimeWebSocketModel()\n @pytest.fixture\n def mock_websocket(self):\n \"\"\"Create a mock websocket connection.\"\"\"\n mock_ws = AsyncMock()", - "detail": "tests.realtime.test_openai_realtime", - "documentation": {} - }, - { - "label": "TestConnectionLifecycle", - "kind": 6, - "importPath": "tests.realtime.test_openai_realtime", - "description": "tests.realtime.test_openai_realtime", - "peekOfCode": "class TestConnectionLifecycle(TestOpenAIRealtimeWebSocketModel):\n \"\"\"Test connection establishment, configuration, and error handling.\"\"\"\n @pytest.mark.asyncio\n async def test_connect_missing_api_key_raises_error(self, model):\n \"\"\"Test that missing API key raises UserError.\"\"\"\n config: dict[str, Any] = {\"initial_model_settings\": {}}\n with patch.dict(\"os.environ\", {}, clear=True):\n with pytest.raises(UserError, match=\"API key is required\"):\n await model.connect(config)\n @pytest.mark.asyncio", - "detail": "tests.realtime.test_openai_realtime", - "documentation": {} - }, - { - "label": "TestEventHandlingRobustness", - "kind": 6, - "importPath": "tests.realtime.test_openai_realtime", - "description": "tests.realtime.test_openai_realtime", - "peekOfCode": "class TestEventHandlingRobustness(TestOpenAIRealtimeWebSocketModel):\n \"\"\"Test event parsing, validation, and error handling robustness.\"\"\"\n @pytest.mark.asyncio\n async def test_handle_malformed_json_logs_error_continues(self, model):\n \"\"\"Test that malformed JSON emits error event but doesn't crash.\"\"\"\n mock_listener = AsyncMock()\n model.add_listener(mock_listener)\n # Malformed JSON should not crash the handler\n await model._handle_ws_event(\"invalid json {\")\n # Should emit error event to listeners", - "detail": "tests.realtime.test_openai_realtime", - "documentation": {} - }, - { - "label": "MockRealtimeModel", - "kind": 6, - "importPath": "tests.realtime.test_runner", - "description": "tests.realtime.test_runner", - "peekOfCode": "class MockRealtimeModel(RealtimeModel):\n async def connect(self, options=None):\n pass\n def add_listener(self, listener):\n pass\n def remove_listener(self, listener):\n pass\n async def send_event(self, event):\n pass\n async def send_message(self, message, other_event_data=None):", - "detail": "tests.realtime.test_runner", - "documentation": {} - }, - { - "label": "mock_agent", - "kind": 2, - "importPath": "tests.realtime.test_runner", - "description": "tests.realtime.test_runner", - "peekOfCode": "def mock_agent():\n agent = Mock(spec=RealtimeAgent)\n agent.get_system_prompt = AsyncMock(return_value=\"Test instructions\")\n agent.get_all_tools = AsyncMock(return_value=[{\"type\": \"function\", \"name\": \"test_tool\"}])\n return agent\n@pytest.fixture\ndef mock_model():\n return MockRealtimeModel()\n@pytest.mark.asyncio\nasync def test_run_creates_session_with_no_settings(mock_agent, mock_model):", - "detail": "tests.realtime.test_runner", - "documentation": {} - }, - { - "label": "mock_model", - "kind": 2, - "importPath": "tests.realtime.test_runner", - "description": "tests.realtime.test_runner", - "peekOfCode": "def mock_model():\n return MockRealtimeModel()\n@pytest.mark.asyncio\nasync def test_run_creates_session_with_no_settings(mock_agent, mock_model):\n \"\"\"Test that run() creates a session correctly if no settings are provided\"\"\"\n runner = RealtimeRunner(mock_agent, model=mock_model)\n with patch(\"agents.realtime.runner.RealtimeSession\") as mock_session_class:\n mock_session = Mock(spec=RealtimeSession)\n mock_session_class.return_value = mock_session\n session = await runner.run()", - "detail": "tests.realtime.test_runner", - "documentation": {} - }, - { - "label": "MockRealtimeModel", - "kind": 6, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "class MockRealtimeModel(RealtimeModel):\n def __init__(self):\n super().__init__()\n self.listeners = []\n self.connect_called = False\n self.close_called = False\n self.sent_events = []\n # Legacy tracking for tests that haven't been updated yet\n self.sent_messages = []\n self.sent_audio = []", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "TestEventHandling", - "kind": 6, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "class TestEventHandling:\n \"\"\"Test suite for event handling and transformation in RealtimeSession.on_event\"\"\"\n @pytest.mark.asyncio\n async def test_error_event_transformation(self, mock_model, mock_agent):\n \"\"\"Test that error events are properly transformed and queued\"\"\"\n session = RealtimeSession(mock_model, mock_agent, None)\n error_event = RealtimeModelErrorEvent(error=\"Test error\")\n await session.on_event(error_event)\n # Check that events were queued\n assert session._event_queue.qsize() == 2", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "TestHistoryManagement", - "kind": 6, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "class TestHistoryManagement:\n \"\"\"Test suite for history management and audio transcription in\n RealtimeSession._get_new_history\"\"\"\n def test_merge_transcript_into_existing_audio_message(self):\n \"\"\"Test merging audio transcript into existing placeholder input_audio message\"\"\"\n # Create initial history with audio message without transcript\n initial_item = UserMessageItem(\n item_id=\"item_1\",\n role=\"user\",\n content=[", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "TestToolCallExecution", - "kind": 6, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "class TestToolCallExecution:\n \"\"\"Test suite for tool call execution flow in RealtimeSession._handle_tool_call\"\"\"\n @pytest.mark.asyncio\n async def test_function_tool_execution_success(\n self, mock_model, mock_agent, mock_function_tool\n ):\n \"\"\"Test successful function tool execution\"\"\"\n # Set up agent to return our mock tool\n mock_agent.get_all_tools.return_value = [mock_function_tool]\n session = RealtimeSession(mock_model, mock_agent, None)", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "TestGuardrailFunctionality", - "kind": 6, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "class TestGuardrailFunctionality:\n \"\"\"Test suite for output guardrail functionality in RealtimeSession\"\"\"\n async def _wait_for_guardrail_tasks(self, session):\n \"\"\"Wait for all pending guardrail tasks to complete.\"\"\"\n import asyncio\n if session._guardrail_tasks:\n await asyncio.gather(*session._guardrail_tasks, return_exceptions=True)\n @pytest.fixture\n def triggered_guardrail(self):\n \"\"\"Creates a guardrail that always triggers\"\"\"", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "mock_agent", - "kind": 2, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "def mock_agent():\n agent = Mock(spec=RealtimeAgent)\n agent.get_all_tools = AsyncMock(return_value=[])\n return agent\n@pytest.fixture\ndef mock_model():\n return MockRealtimeModel()\n@pytest.fixture\ndef mock_function_tool():\n tool = Mock(spec=FunctionTool)", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "mock_model", - "kind": 2, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "def mock_model():\n return MockRealtimeModel()\n@pytest.fixture\ndef mock_function_tool():\n tool = Mock(spec=FunctionTool)\n tool.name = \"test_function\"\n tool.on_invoke_tool = AsyncMock(return_value=\"function_result\")\n return tool\n@pytest.fixture\ndef mock_handoff():", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "mock_function_tool", - "kind": 2, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "def mock_function_tool():\n tool = Mock(spec=FunctionTool)\n tool.name = \"test_function\"\n tool.on_invoke_tool = AsyncMock(return_value=\"function_result\")\n return tool\n@pytest.fixture\ndef mock_handoff():\n handoff = Mock(spec=Handoff)\n handoff.name = \"test_handoff\"\n return handoff", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "mock_handoff", - "kind": 2, - "importPath": "tests.realtime.test_session", - "description": "tests.realtime.test_session", - "peekOfCode": "def mock_handoff():\n handoff = Mock(spec=Handoff)\n handoff.name = \"test_handoff\"\n return handoff\nclass TestEventHandling:\n \"\"\"Test suite for event handling and transformation in RealtimeSession.on_event\"\"\"\n @pytest.mark.asyncio\n async def test_error_event_transformation(self, mock_model, mock_agent):\n \"\"\"Test that error events are properly transformed and queued\"\"\"\n session = RealtimeSession(mock_model, mock_agent, None)", - "detail": "tests.realtime.test_session", - "documentation": {} - }, - { - "label": "TestRealtimeTracingIntegration", - "kind": 6, - "importPath": "tests.realtime.test_tracing", - "description": "tests.realtime.test_tracing", - "peekOfCode": "class TestRealtimeTracingIntegration:\n \"\"\"Test tracing configuration and session.update integration.\"\"\"\n @pytest.fixture\n def model(self):\n \"\"\"Create a fresh model instance for each test.\"\"\"\n return OpenAIRealtimeWebSocketModel()\n @pytest.fixture\n def mock_websocket(self):\n \"\"\"Create a mock websocket connection.\"\"\"\n mock_ws = AsyncMock()", - "detail": "tests.realtime.test_tracing", - "documentation": {} - }, - { - "label": "pytest_ignore_collect", - "kind": 2, - "importPath": "tests.voice.conftest", - "description": "tests.voice.conftest", - "peekOfCode": "def pytest_ignore_collect(collection_path, config):\n if sys.version_info[:2] == (3, 9):\n this_dir = os.path.dirname(__file__)\n if str(collection_path).startswith(this_dir):\n return True", - "detail": "tests.voice.conftest", - "documentation": {} - }, - { - "label": "FakeTTS", - "kind": 6, - "importPath": "tests.voice.fake_models", - "description": "tests.voice.fake_models", - "peekOfCode": "class FakeTTS(TTSModel):\n \"\"\"Fakes TTS by just returning string bytes.\"\"\"\n def __init__(self, strategy: Literal[\"default\", \"split_words\"] = \"default\"):\n self.strategy = strategy\n @property\n def model_name(self) -> str:\n return \"fake_tts\"\n async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:\n if self.strategy == \"default\":\n yield np.zeros(2, dtype=np.int16).tobytes()", - "detail": "tests.voice.fake_models", - "documentation": {} - }, - { - "label": "FakeSession", - "kind": 6, - "importPath": "tests.voice.fake_models", - "description": "tests.voice.fake_models", - "peekOfCode": "class FakeSession(StreamedTranscriptionSession):\n \"\"\"A fake streamed transcription session that yields preconfigured transcripts.\"\"\"\n def __init__(self):\n self.outputs: list[str] = []\n async def transcribe_turns(self) -> AsyncIterator[str]:\n for t in self.outputs:\n yield t\n async def close(self) -> None:\n return None\nclass FakeSTT(STTModel):", - "detail": "tests.voice.fake_models", - "documentation": {} - }, - { - "label": "FakeSTT", - "kind": 6, - "importPath": "tests.voice.fake_models", - "description": "tests.voice.fake_models", - "peekOfCode": "class FakeSTT(STTModel):\n \"\"\"A fake STT model that either returns a single transcript or yields multiple.\"\"\"\n def __init__(self, outputs: list[str] | None = None):\n self.outputs = outputs or []\n @property\n def model_name(self) -> str:\n return \"fake_stt\"\n async def transcribe(self, _: AudioInput, __: STTModelSettings, ___: bool, ____: bool) -> str:\n return self.outputs.pop(0)\n async def create_session(", - "detail": "tests.voice.fake_models", - "documentation": {} - }, - { - "label": "FakeWorkflow", - "kind": 6, - "importPath": "tests.voice.fake_models", - "description": "tests.voice.fake_models", - "peekOfCode": "class FakeWorkflow(VoiceWorkflowBase):\n \"\"\"A fake workflow that yields preconfigured outputs.\"\"\"\n def __init__(self, outputs: list[list[str]] | None = None):\n self.outputs = outputs or []\n def add_output(self, output: list[str]) -> None:\n self.outputs.append(output)\n def add_multiple_outputs(self, outputs: list[list[str]]) -> None:\n self.outputs.extend(outputs)\n async def run(self, _: str) -> AsyncIterator[str]:\n if not self.outputs:", - "detail": "tests.voice.fake_models", - "documentation": {} - }, - { - "label": "FakeStreamedAudioInput", - "kind": 6, - "importPath": "tests.voice.fake_models", - "description": "tests.voice.fake_models", - "peekOfCode": "class FakeStreamedAudioInput:\n @classmethod\n async def get(cls, count: int) -> StreamedAudioInput:\n input = StreamedAudioInput()\n for _ in range(count):\n await input.add_audio(np.zeros(2, dtype=np.int16))\n return input", - "detail": "tests.voice.fake_models", - "documentation": {} - }, - { - "label": "TestAudioInput", - "kind": 6, - "importPath": "tests.voice.test_input", - "description": "tests.voice.test_input", - "peekOfCode": "class TestAudioInput:\n def test_audio_input_default_params(self):\n # Create a simple sine wave\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n audio_input = AudioInput(buffer=buffer)\n assert audio_input.frame_rate == DEFAULT_SAMPLE_RATE\n assert audio_input.sample_width == 2\n assert audio_input.channels == 1\n assert np.array_equal(audio_input.buffer, buffer)", - "detail": "tests.voice.test_input", - "documentation": {} - }, - { - "label": "TestStreamedAudioInput", - "kind": 6, - "importPath": "tests.voice.test_input", - "description": "tests.voice.test_input", - "peekOfCode": "class TestStreamedAudioInput:\n @pytest.mark.asyncio\n async def test_streamed_audio_input(self):\n streamed_input = StreamedAudioInput()\n # Create some test audio data\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n audio1 = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n audio2 = np.sin(2 * np.pi * 880 * t).astype(np.float32)\n # Add audio to the queue\n await streamed_input.add_audio(audio1)", - "detail": "tests.voice.test_input", - "documentation": {} - }, - { - "label": "test_buffer_to_audio_file_int16", - "kind": 2, - "importPath": "tests.voice.test_input", - "description": "tests.voice.test_input", - "peekOfCode": "def test_buffer_to_audio_file_int16():\n # Create a simple sine wave in int16 format\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = (np.sin(2 * np.pi * 440 * t) * 32767).astype(np.int16)\n filename, audio_file, content_type = _buffer_to_audio_file(buffer)\n assert filename == \"audio.wav\"\n assert content_type == \"audio/wav\"\n assert isinstance(audio_file, io.BytesIO)\n # Verify the WAV file contents\n with wave.open(audio_file, \"rb\") as wav_file:", - "detail": "tests.voice.test_input", - "documentation": {} - }, - { - "label": "test_buffer_to_audio_file_float32", - "kind": 2, - "importPath": "tests.voice.test_input", - "description": "tests.voice.test_input", - "peekOfCode": "def test_buffer_to_audio_file_float32():\n # Create a simple sine wave in float32 format\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)\n buffer = np.sin(2 * np.pi * 440 * t).astype(np.float32)\n filename, audio_file, content_type = _buffer_to_audio_file(buffer)\n assert filename == \"audio.wav\"\n assert content_type == \"audio/wav\"\n assert isinstance(audio_file, io.BytesIO)\n # Verify the WAV file contents\n with wave.open(audio_file, \"rb\") as wav_file:", - "detail": "tests.voice.test_input", - "documentation": {} - }, - { - "label": "test_buffer_to_audio_file_invalid_dtype", - "kind": 2, - "importPath": "tests.voice.test_input", - "description": "tests.voice.test_input", - "peekOfCode": "def test_buffer_to_audio_file_invalid_dtype():\n # Create a buffer with invalid dtype (float64)\n buffer = np.array([1.0, 2.0, 3.0], dtype=np.float64)\n with pytest.raises(UserError, match=\"Buffer must be a numpy array of int16 or float32\"):\n # Purposely ignore the type error\n _buffer_to_audio_file(buffer) # type: ignore\nclass TestAudioInput:\n def test_audio_input_default_params(self):\n # Create a simple sine wave\n t = np.linspace(0, 1, DEFAULT_SAMPLE_RATE)", - "detail": "tests.voice.test_input", - "documentation": {} - }, - { - "label": "create_mock_websocket", - "kind": 2, - "importPath": "tests.voice.test_openai_stt", - "description": "tests.voice.test_openai_stt", - "peekOfCode": "def create_mock_websocket(messages: list[str]) -> AsyncMock:\n \"\"\"\n Creates a mock websocket (AsyncMock) that will return the provided incoming_messages\n from __aiter__() as if they came from the server.\n \"\"\"\n mock_ws = AsyncMock()\n mock_ws.__aenter__.return_value = mock_ws\n # The incoming_messages are strings that we pretend come from the server\n mock_ws.__aiter__.return_value = iter(messages)\n return mock_ws", - "detail": "tests.voice.test_openai_stt", - "documentation": {} - }, - { - "label": "fake_time", - "kind": 2, - "importPath": "tests.voice.test_openai_stt", - "description": "tests.voice.test_openai_stt", - "peekOfCode": "def fake_time(increment: int):\n current = 1000\n while True:\n yield current\n current += increment\n# ===== Tests =====\n@pytest.mark.asyncio\nasync def test_non_json_messages_should_crash():\n \"\"\"This tests that non-JSON messages will raise an exception\"\"\"\n # Setup: mock websockets.connect", - "detail": "tests.voice.test_openai_stt", - "documentation": {} - }, - { - "label": "_FakeStreamResponse", - "kind": 6, - "importPath": "tests.voice.test_openai_tts", - "description": "tests.voice.test_openai_tts", - "peekOfCode": "class _FakeStreamResponse:\n \"\"\"A minimal async context manager to simulate streaming audio bytes.\"\"\"\n def __init__(self, chunks: list[bytes]):\n self._chunks = chunks\n async def __aenter__(self) -> \"_FakeStreamResponse\":\n return self\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n return None\n async def iter_bytes(self, chunk_size: int = 1024):\n for chunk in self._chunks:", - "detail": "tests.voice.test_openai_tts", - "documentation": {} - }, - { - "label": "FakeStreamingModel", - "kind": 6, - "importPath": "tests.voice.test_workflow", - "description": "tests.voice.test_workflow", - "peekOfCode": "class FakeStreamingModel(Model):\n def __init__(self):\n self.turn_outputs: list[list[TResponseOutputItem]] = []\n def set_next_output(self, output: list[TResponseOutputItem]):\n self.turn_outputs.append(output)\n def add_multiple_turn_outputs(self, outputs: list[list[TResponseOutputItem]]):\n self.turn_outputs.extend(outputs)\n def get_next_output(self) -> list[TResponseOutputItem]:\n if not self.turn_outputs:\n return []", - "detail": "tests.voice.test_workflow", - "documentation": {} - }, - { - "label": "setup_span_processor", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def setup_span_processor():\n set_trace_processors([SPAN_PROCESSOR_TESTING])\n# This fixture will run before each test\n@pytest.fixture(autouse=True)\ndef clear_span_processor():\n SPAN_PROCESSOR_TESTING.force_flush()\n SPAN_PROCESSOR_TESTING.shutdown()\n SPAN_PROCESSOR_TESTING.clear()\n# This fixture will run before each test\n@pytest.fixture(autouse=True)", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "clear_span_processor", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def clear_span_processor():\n SPAN_PROCESSOR_TESTING.force_flush()\n SPAN_PROCESSOR_TESTING.shutdown()\n SPAN_PROCESSOR_TESTING.clear()\n# This fixture will run before each test\n@pytest.fixture(autouse=True)\ndef clear_openai_settings():\n _openai_shared._default_openai_key = None\n _openai_shared._default_openai_client = None\n _openai_shared._use_responses_by_default = True", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "clear_openai_settings", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def clear_openai_settings():\n _openai_shared._default_openai_key = None\n _openai_shared._default_openai_client = None\n _openai_shared._use_responses_by_default = True\n@pytest.fixture(autouse=True)\ndef clear_default_runner():\n set_default_agent_runner(None)\n# This fixture will run after all tests end\n@pytest.fixture(autouse=True, scope=\"session\")\ndef shutdown_trace_provider():", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "clear_default_runner", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def clear_default_runner():\n set_default_agent_runner(None)\n# This fixture will run after all tests end\n@pytest.fixture(autouse=True, scope=\"session\")\ndef shutdown_trace_provider():\n yield\n get_trace_provider().shutdown()\n@pytest.fixture(autouse=True)\ndef disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "shutdown_trace_provider", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def shutdown_trace_provider():\n yield\n get_trace_provider().shutdown()\n@pytest.fixture(autouse=True)\ndef disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.\n if request.node.get_closest_marker(\"allow_call_model_methods\"):\n return\n def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "disable_real_model_clients", - "kind": 2, - "importPath": "tests.conftest", - "description": "tests.conftest", - "peekOfCode": "def disable_real_model_clients(monkeypatch, request):\n # If the test is marked to allow the method call, don't override it.\n if request.node.get_closest_marker(\"allow_call_model_methods\"):\n return\n def failing_version(*args, **kwargs):\n pytest.fail(\"Real models should not be used in tests!\")\n monkeypatch.setattr(OpenAIResponsesModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIResponsesModel, \"stream_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"get_response\", failing_version)\n monkeypatch.setattr(OpenAIChatCompletionsModel, \"stream_response\", failing_version)", - "detail": "tests.conftest", - "documentation": {} - }, - { - "label": "FakeModel", - "kind": 6, - "importPath": "tests.fake_model", - "description": "tests.fake_model", - "peekOfCode": "class FakeModel(Model):\n def __init__(\n self,\n tracing_enabled: bool = False,\n initial_output: list[TResponseOutputItem] | Exception | None = None,\n ):\n if initial_output is None:\n initial_output = []\n self.turn_outputs: list[list[TResponseOutputItem] | Exception] = (\n [initial_output] if initial_output else []", - "detail": "tests.fake_model", - "documentation": {} - }, - { - "label": "get_response_obj", - "kind": 2, - "importPath": "tests.fake_model", - "description": "tests.fake_model", - "peekOfCode": "def get_response_obj(\n output: list[TResponseOutputItem],\n response_id: str | None = None,\n usage: Usage | None = None,\n) -> Response:\n return Response(\n id=response_id or \"123\",\n created_at=123,\n model=\"test_model\",\n object=\"response\",", - "detail": "tests.fake_model", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_agent_config", - "description": "tests.test_agent_config", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_agent_final_output():\n agent = Agent(\n name=\"test\",\n output_type=Foo,\n )\n schema = AgentRunner._get_output_schema(agent)\n assert isinstance(schema, AgentOutputSchema)", - "detail": "tests.test_agent_config", - "documentation": {} - }, - { - "label": "AgentHooksForTests", - "kind": 6, - "importPath": "tests.test_agent_hooks", - "description": "tests.test_agent_hooks", - "peekOfCode": "class AgentHooksForTests(AgentHooks):\n def __init__(self):\n self.events: dict[str, int] = defaultdict(int)\n def reset(self):\n self.events.clear()\n async def on_start(self, context: RunContextWrapper[TContext], agent: Agent[TContext]) -> None:\n self.events[\"on_start\"] += 1\n async def on_end(\n self,\n context: RunContextWrapper[TContext],", - "detail": "tests.test_agent_hooks", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_agent_hooks", - "description": "tests.test_agent_hooks", - "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_agent_hooks():\n hooks = AgentHooksForTests()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", - "detail": "tests.test_agent_hooks", - "documentation": {} - }, - { - "label": "EmptyAgentHooks", - "kind": 6, - "importPath": "tests.test_agent_hooks", - "description": "tests.test_agent_hooks", - "peekOfCode": "class EmptyAgentHooks(AgentHooks):\n pass\n@pytest.mark.asyncio\nasync def test_base_agent_hooks_dont_crash():\n hooks = EmptyAgentHooks()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", - "detail": "tests.test_agent_hooks", - "documentation": {} - }, - { - "label": "PromptCaptureFakeModel", - "kind": 6, - "importPath": "tests.test_agent_prompt", - "description": "tests.test_agent_prompt", - "peekOfCode": "class PromptCaptureFakeModel(FakeModel):\n \"\"\"Subclass of FakeModel that records the prompt passed to the model.\"\"\"\n def __init__(self):\n super().__init__()\n self.last_prompt = None\n async def get_response(\n self,\n system_instructions,\n input,\n model_settings,", - "detail": "tests.test_agent_prompt", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_agent_runner", - "description": "tests.test_agent_runner", - "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_structured_output():\n model = FakeModel()\n agent_1 = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"bar\", \"bar_result\")],\n output_type=Foo,", - "detail": "tests.test_agent_runner", - "documentation": {} - }, - { - "label": "remove_new_items", - "kind": 2, - "importPath": "tests.test_agent_runner", - "description": "tests.test_agent_runner", - "peekOfCode": "def remove_new_items(handoff_input_data: HandoffInputData) -> HandoffInputData:\n return HandoffInputData(\n input_history=handoff_input_data.input_history,\n pre_handoff_items=(),\n new_items=(),\n )\n@pytest.mark.asyncio\nasync def test_handoff_filters():\n model = FakeModel()\n agent_1 = Agent(", - "detail": "tests.test_agent_runner", - "documentation": {} - }, - { - "label": "test_tool_one", - "kind": 2, - "importPath": "tests.test_agent_runner", - "description": "tests.test_agent_runner", - "peekOfCode": "def test_tool_one():\n return Foo(bar=\"tool_one_result\")\n@function_tool\ndef test_tool_two():\n return \"tool_two_result\"\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_first_output():\n model = FakeModel()\n agent = Agent(\n name=\"test\",", - "detail": "tests.test_agent_runner", - "documentation": {} - }, - { - "label": "test_tool_two", - "kind": 2, - "importPath": "tests.test_agent_runner", - "description": "tests.test_agent_runner", - "peekOfCode": "def test_tool_two():\n return \"tool_two_result\"\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_first_output():\n model = FakeModel()\n agent = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"foo\", \"tool_result\"), test_tool_one, test_tool_two],\n tool_use_behavior=\"stop_on_first_tool\",", - "detail": "tests.test_agent_runner", - "documentation": {} - }, - { - "label": "custom_tool_use_behavior", - "kind": 2, - "importPath": "tests.test_agent_runner", - "description": "tests.test_agent_runner", - "peekOfCode": "def custom_tool_use_behavior(\n context: RunContextWrapper[Any], results: list[FunctionToolResult]\n) -> ToolsToFinalOutputResult:\n if \"test_tool_one\" in [result.tool.name for result in results]:\n return ToolsToFinalOutputResult(is_final_output=True, final_output=\"the_final_output\")\n else:\n return ToolsToFinalOutputResult(is_final_output=False, final_output=None)\n@pytest.mark.asyncio\nasync def test_tool_use_behavior_custom_function():\n model = FakeModel()", - "detail": "tests.test_agent_runner", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_agent_runner_streamed", - "description": "tests.test_agent_runner_streamed", - "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_structured_output():\n model = FakeModel()\n agent_1 = Agent(\n name=\"test\",\n model=model,\n tools=[get_function_tool(\"bar\", \"bar_result\")],\n output_type=Foo,", - "detail": "tests.test_agent_runner_streamed", - "documentation": {} - }, - { - "label": "remove_new_items", - "kind": 2, - "importPath": "tests.test_agent_runner_streamed", - "description": "tests.test_agent_runner_streamed", - "peekOfCode": "def remove_new_items(handoff_input_data: HandoffInputData) -> HandoffInputData:\n return HandoffInputData(\n input_history=handoff_input_data.input_history,\n pre_handoff_items=(),\n new_items=(),\n )\n@pytest.mark.asyncio\nasync def test_handoff_filters():\n model = FakeModel()\n agent_1 = Agent(", - "detail": "tests.test_agent_runner_streamed", - "documentation": {} - }, - { - "label": "LoggingComputer", - "kind": 6, - "importPath": "tests.test_computer_action", - "description": "tests.test_computer_action", - "peekOfCode": "class LoggingComputer(Computer):\n \"\"\"A `Computer` implementation that logs calls to its methods for verification in tests.\"\"\"\n def __init__(self, screenshot_return: str = \"screenshot\"):\n self.calls: list[tuple[str, tuple[Any, ...]]] = []\n self._screenshot_return = screenshot_return\n @property\n def environment(self):\n return \"mac\"\n @property\n def dimensions(self) -> tuple[int, int]:", - "detail": "tests.test_computer_action", - "documentation": {} - }, - { - "label": "LoggingAsyncComputer", - "kind": 6, - "importPath": "tests.test_computer_action", - "description": "tests.test_computer_action", - "peekOfCode": "class LoggingAsyncComputer(AsyncComputer):\n \"\"\"An `AsyncComputer` implementation that logs calls to its methods for verification.\"\"\"\n def __init__(self, screenshot_return: str = \"async_screenshot\"):\n self.calls: list[tuple[str, tuple[Any, ...]]] = []\n self._screenshot_return = screenshot_return\n @property\n def environment(self):\n return \"mac\"\n @property\n def dimensions(self) -> tuple[int, int]:", - "detail": "tests.test_computer_action", - "documentation": {} - }, - { - "label": "LoggingRunHooks", - "kind": 6, - "importPath": "tests.test_computer_action", - "description": "tests.test_computer_action", - "peekOfCode": "class LoggingRunHooks(RunHooks[Any]):\n \"\"\"Capture on_tool_start and on_tool_end invocations.\"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.started: list[tuple[Agent[Any], Any]] = []\n self.ended: list[tuple[Agent[Any], Any, str]] = []\n async def on_tool_start(\n self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action,\n ) -> None:\n self.started.append((agent, action.computer_tool))", - "detail": "tests.test_computer_action", - "documentation": {} - }, - { - "label": "LoggingAgentHooks", - "kind": 6, - "importPath": "tests.test_computer_action", - "description": "tests.test_computer_action", - "peekOfCode": "class LoggingAgentHooks(AgentHooks[Any]):\n \"\"\"Minimal override to capture agent's tool hook invocations.\"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.started: list[tuple[Agent[Any], Any]] = []\n self.ended: list[tuple[Agent[Any], Any, str]] = []\n async def on_tool_start(\n self, context: RunContextWrapper[Any], agent: Agent[Any], action: Action,\n ) -> None:\n self.started.append((agent, action.computer_tool))", - "detail": "tests.test_computer_action", - "documentation": {} - }, - { - "label": "test_cc_no_default_key_errors", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_cc_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\ndef test_cc_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_cc_set_default_openai_key", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_cc_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_no_default_key_errors(monkeypatch):", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_cc_set_default_openai_client", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_cc_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n chat_model = OpenAIProvider(use_responses=False).get_model(\"gpt-4\")\n assert chat_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n assert os.getenv(\"OPENAI_API_KEY\") is None\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=True).get_model(\"gpt-4\")", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_resp_no_default_key_errors", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_resp_no_default_key_errors(monkeypatch):\n monkeypatch.delenv(\"OPENAI_API_KEY\", raising=False)\n assert os.getenv(\"OPENAI_API_KEY\") is None\n with pytest.raises(openai.OpenAIError):\n OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\ndef test_resp_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_set_default_openai_client():", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_resp_set_default_openai_key", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_resp_set_default_openai_key():\n set_default_openai_key(\"test_key\")\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_resp_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_set_default_openai_api():", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_resp_set_default_openai_client", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_resp_set_default_openai_client():\n client = openai.AsyncOpenAI(api_key=\"test_key\")\n set_default_openai_client(client)\n resp_model = OpenAIProvider(use_responses=True).get_model(\"gpt-4\")\n assert resp_model._client.api_key == \"test_key\" # type: ignore\ndef test_set_default_openai_api():\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (\n \"Default should be responses\"\n )\n set_default_openai_api(\"chat_completions\")", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "test_set_default_openai_api", - "kind": 2, - "importPath": "tests.test_config", - "description": "tests.test_config", - "peekOfCode": "def test_set_default_openai_api():\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (\n \"Default should be responses\"\n )\n set_default_openai_api(\"chat_completions\")\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIChatCompletionsModel), (\n \"Should be chat completions model\"\n )\n set_default_openai_api(\"responses\")\n assert isinstance(OpenAIProvider().get_model(\"gpt-4\"), OpenAIResponsesModel), (", - "detail": "tests.test_config", - "documentation": {} - }, - { - "label": "Bar", - "kind": 6, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "class Bar:\n def func_bar(self, a: int, b: float) -> str:\n \"\"\"\n This is func_bar.\n Args:\n a: The first argument.\n b: The second argument.\n Returns:\n A result\n \"\"\"", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "func_foo_google", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def func_foo_google(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n Args:\n a: The first argument.\n b: The second argument.\n Returns:\n A result\n \"\"\"\n return \"ok\"", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "func_foo_numpy", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def func_foo_numpy(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n Parameters\n ----------\n a: int\n The first argument.\n b: float\n The second argument.\n Returns", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "func_foo_sphinx", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def func_foo_sphinx(a: int, b: float) -> str:\n \"\"\"\n This is func_foo.\n :param a: The first argument.\n :param b: The second argument.\n :return: A result\n \"\"\"\n return \"ok\"\nclass Bar:\n def func_bar(self, a: int, b: float) -> str:", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "test_functions_are_ok", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def test_functions_are_ok():\n func_foo_google(1, 2.0)\n func_foo_numpy(1, 2.0)\n func_foo_sphinx(1, 2.0)\n Bar().func_bar(1, 2.0)\n Bar.func_baz(1, 2.0)\ndef test_auto_detection() -> None:\n doc = generate_func_documentation(func_foo_google)\n assert doc.name == \"func_foo_google\"\n assert doc.description == \"This is func_foo.\"", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "test_auto_detection", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def test_auto_detection() -> None:\n doc = generate_func_documentation(func_foo_google)\n assert doc.name == \"func_foo_google\"\n assert doc.description == \"This is func_foo.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\n doc = generate_func_documentation(func_foo_numpy)\n assert doc.name == \"func_foo_numpy\"\n assert doc.description == \"This is func_foo.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\n doc = generate_func_documentation(func_foo_sphinx)", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "test_instance_method", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def test_instance_method() -> None:\n bar = Bar()\n doc = generate_func_documentation(bar.func_bar)\n assert doc.name == \"func_bar\"\n assert doc.description == \"This is func_bar.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}\ndef test_classmethod() -> None:\n doc = generate_func_documentation(Bar.func_baz)\n assert doc.name == \"func_baz\"\n assert doc.description == \"This is func_baz.\"", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "test_classmethod", - "kind": 2, - "importPath": "tests.test_doc_parsing", - "description": "tests.test_doc_parsing", - "peekOfCode": "def test_classmethod() -> None:\n doc = generate_func_documentation(Bar.func_baz)\n assert doc.name == \"func_baz\"\n assert doc.description == \"This is func_baz.\"\n assert doc.param_descriptions == {\"a\": \"The first argument.\", \"b\": \"The second argument.\"}", - "detail": "tests.test_doc_parsing", - "documentation": {} - }, - { - "label": "fake_agent", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def fake_agent():\n return Agent(\n name=\"fake_agent\",\n )\ndef _get_message_input_item(content: str) -> TResponseInputItem:\n return {\n \"role\": \"assistant\",\n \"content\": content,\n }\ndef _get_function_result_input_item(content: str) -> TResponseInputItem:", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_empty_data", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_empty_data():\n handoff_input_data = HandoffInputData(input_history=(), pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_historyonly():\n handoff_input_data = HandoffInputData(input_history=\"Hello\", pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_history_and_list():\n handoff_input_data = HandoffInputData(", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_str_historyonly", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_str_historyonly():\n handoff_input_data = HandoffInputData(input_history=\"Hello\", pre_handoff_items=(), new_items=())\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_str_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=\"Hello\",\n pre_handoff_items=(),\n new_items=(_get_message_output_run_item(\"Hello\"),),\n )", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_str_history_and_list", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_str_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=\"Hello\",\n pre_handoff_items=(),\n new_items=(_get_message_output_run_item(\"Hello\"),),\n )\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_list_history_and_list():\n handoff_input_data = HandoffInputData(", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_list_history_and_list", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_list_history_and_list():\n handoff_input_data = HandoffInputData(\n input_history=(_get_message_input_item(\"Hello\"),),\n pre_handoff_items=(_get_message_output_run_item(\"123\"),),\n new_items=(_get_message_output_run_item(\"World\"),),\n )\n filtered_data = remove_all_tools(handoff_input_data)\n assert filtered_data == handoff_input_data\ndef test_removes_tools_from_history():\n handoff_input_data = HandoffInputData(", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_removes_tools_from_history", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_removes_tools_from_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_function_result_input_item(\"World\"),\n _get_message_input_item(\"Hello2\"),\n ),\n pre_handoff_items=(\n _get_tool_output_run_item(\"abc\"),\n _get_message_output_run_item(\"123\"),", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_removes_tools_from_new_items", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_removes_tools_from_new_items():\n handoff_input_data = HandoffInputData(\n input_history=(),\n pre_handoff_items=(),\n new_items=(\n _get_message_output_run_item(\"Hello\"),\n _get_tool_output_run_item(\"World\"),\n ),\n )\n filtered_data = remove_all_tools(handoff_input_data)", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_removes_tools_from_new_items_and_history", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_removes_tools_from_new_items_and_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_function_result_input_item(\"World\"),\n _get_message_input_item(\"Hello2\"),\n ),\n pre_handoff_items=(\n _get_message_output_run_item(\"123\"),\n _get_tool_output_run_item(\"456\"),", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "test_removes_handoffs_from_history", - "kind": 2, - "importPath": "tests.test_extension_filters", - "description": "tests.test_extension_filters", - "peekOfCode": "def test_removes_handoffs_from_history():\n handoff_input_data = HandoffInputData(\n input_history=(\n _get_message_input_item(\"Hello1\"),\n _get_handoff_input_item(\"World\"),\n ),\n pre_handoff_items=(\n _get_message_output_run_item(\"Hello\"),\n _get_tool_output_run_item(\"World\"),\n _get_handoff_output_run_item(\"World\"),", - "detail": "tests.test_extension_filters", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "class Foo(TypedDict):\n a: int\n b: str\nclass InnerModel(BaseModel):\n a: int\n b: str\nclass OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "InnerModel", - "kind": 6, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "class InnerModel(BaseModel):\n a: int\n b: str\nclass OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "OuterModel", - "kind": 6, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "class OuterModel(BaseModel):\n inner: InnerModel\n foo: Foo\ndef complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "MyClass", - "kind": 6, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "class MyClass:\n def foo(self, a: int, b: int = 5):\n return a + b\n def foo_ctx(self, ctx: RunContextWrapper[str], a: int, b: int = 5):\n return a + b\n @classmethod\n def bar(cls, a: int, b: int = 5):\n return a + b\n @classmethod\n def bar_ctx(cls, ctx: RunContextWrapper[str], a: int, b: int = 5):", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "MyEnum", - "kind": 6, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "class MyEnum(str, Enum):\n FOO = \"foo\"\n BAR = \"bar\"\n BAZ = \"baz\"\ndef enum_and_literal_function(a: MyEnum, b: Literal[\"a\", \"b\", \"c\"]) -> str:\n return f\"{a.value} {b}\"\ndef test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "no_args_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def no_args_function():\n \"\"\"This function has no args.\"\"\"\n return \"ok\"\ndef test_no_args_function():\n func_schema = function_schema(no_args_function)\n assert func_schema.params_json_schema.get(\"title\") == \"no_args_function_args\"\n assert func_schema.description == \"This function has no args.\"\n assert not func_schema.takes_context\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_no_args_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_no_args_function():\n func_schema = function_schema(no_args_function)\n assert func_schema.params_json_schema.get(\"title\") == \"no_args_function_args\"\n assert func_schema.description == \"This function has no args.\"\n assert not func_schema.takes_context\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function(*args, **kwargs_dict)\n assert result == \"ok\"\ndef no_args_function_with_context(ctx: RunContextWrapper[str]):", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "no_args_function_with_context", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def no_args_function_with_context(ctx: RunContextWrapper[str]):\n return \"ok\"\ndef test_no_args_function_with_context() -> None:\n func_schema = function_schema(no_args_function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function_with_context(context, *args, **kwargs_dict)\n assert result == \"ok\"", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_no_args_function_with_context", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_no_args_function_with_context() -> None:\n func_schema = function_schema(no_args_function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n parsed = func_schema.params_pydantic_model()\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = no_args_function_with_context(context, *args, **kwargs_dict)\n assert result == \"ok\"\ndef simple_function(a: int, b: int = 5):\n \"\"\"", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "simple_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def simple_function(a: int, b: int = 5):\n \"\"\"\n Args:\n a: The first argument\n b: The second argument\n Returns:\n The sum of a and b\n \"\"\"\n return a + b\ndef test_simple_function():", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_simple_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_simple_function():\n \"\"\"Test a function that has simple typed parameters and defaults.\"\"\"\n func_schema = function_schema(simple_function)\n # Check that the JSON schema is a dictionary with title, type, etc.\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"simple_function_args\"\n assert (\n func_schema.params_json_schema.get(\"properties\", {}).get(\"a\").get(\"description\")\n == \"The first argument\"\n )", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "varargs_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def varargs_function(x: int, *numbers: float, flag: bool = False, **kwargs: Any):\n return x, numbers, flag, kwargs\ndef test_varargs_function():\n \"\"\"Test a function that uses *args and **kwargs.\"\"\"\n func_schema = function_schema(varargs_function, strict_json_schema=False)\n # Check JSON schema structure\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"varargs_function_args\"\n # Valid input including *args in 'numbers' and **kwargs in 'kwargs'\n valid_input = {", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_varargs_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_varargs_function():\n \"\"\"Test a function that uses *args and **kwargs.\"\"\"\n func_schema = function_schema(varargs_function, strict_json_schema=False)\n # Check JSON schema structure\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"varargs_function_args\"\n # Valid input including *args in 'numbers' and **kwargs in 'kwargs'\n valid_input = {\n \"x\": 10,\n \"numbers\": [1.1, 2.2, 3.3],", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "complex_args_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def complex_args_function(model: OuterModel) -> str:\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}\"\ndef test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))\n valid_input = {\n \"model\": model.model_dump(),", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_nested_data_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_nested_data_function():\n func_schema = function_schema(complex_args_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_function_args\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))\n valid_input = {\n \"model\": model.model_dump(),\n }\n parsed = func_schema.params_pydantic_model(**valid_input)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "complex_args_and_docs_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def complex_args_and_docs_function(model: OuterModel, some_flag: int = 0) -> str:\n \"\"\"\n This function takes a model and a flag, and returns a string.\n Args:\n model: A model with an inner and foo field\n some_flag: An optional flag with a default of 0\n Returns:\n A string with the values of the model and flag\n \"\"\"\n return f\"{model.inner.a}, {model.inner.b}, {model.foo['a']}, {model.foo['b']}, {some_flag or 0}\"", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_complex_args_and_docs_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_complex_args_and_docs_function():\n func_schema = function_schema(complex_args_and_docs_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"complex_args_and_docs_function_args\"\n # Check docstring is parsed correctly\n properties = func_schema.params_json_schema.get(\"properties\", {})\n assert properties.get(\"model\").get(\"description\") == \"A model with an inner and foo field\"\n assert properties.get(\"some_flag\").get(\"description\") == \"An optional flag with a default of 0\"\n # Valid input\n model = OuterModel(inner=InnerModel(a=1, b=\"hello\"), foo=Foo(a=2, b=\"world\"))", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "function_with_context", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def function_with_context(ctx: RunContextWrapper[str], a: int, b: int = 5):\n return a + b\ndef test_function_with_context():\n func_schema = function_schema(function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = function_with_context(context, *args, **kwargs_dict)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_context", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_context():\n func_schema = function_schema(function_with_context)\n assert func_schema.takes_context\n context = RunContextWrapper(context=\"test\")\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)\n result = function_with_context(context, *args, **kwargs_dict)\n assert result == 3\nclass MyClass:", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_class_based_functions", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_class_based_functions():\n context = RunContextWrapper(context=\"test\")\n # Instance method\n instance = MyClass()\n func_schema = function_schema(instance.foo)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"foo_args\"\n input = {\"a\": 1, \"b\": 2}\n parsed = func_schema.params_pydantic_model(**input)\n args, kwargs_dict = func_schema.to_call_args(parsed)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "enum_and_literal_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def enum_and_literal_function(a: MyEnum, b: Literal[\"a\", \"b\", \"c\"]) -> str:\n return f\"{a.value} {b}\"\ndef test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"\n # Check that the enum values are included in the JSON schema\n assert func_schema.params_json_schema.get(\"$defs\", {}).get(\"MyEnum\", {}).get(\"enum\") == [\n \"foo\",\n \"bar\",", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_enum_and_literal_function", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_enum_and_literal_function():\n func_schema = function_schema(enum_and_literal_function)\n assert isinstance(func_schema.params_json_schema, dict)\n assert func_schema.params_json_schema.get(\"title\") == \"enum_and_literal_function_args\"\n # Check that the enum values are included in the JSON schema\n assert func_schema.params_json_schema.get(\"$defs\", {}).get(\"MyEnum\", {}).get(\"enum\") == [\n \"foo\",\n \"bar\",\n \"baz\",\n ]", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_run_context_in_non_first_position_raises_value_error", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_run_context_in_non_first_position_raises_value_error():\n # When a parameter (after the first) is annotated as RunContextWrapper,\n # function_schema() should raise a UserError.\n def func(a: int, context: RunContextWrapper) -> None:\n pass\n with pytest.raises(UserError):\n function_schema(func, use_docstring_info=False)\ndef test_var_positional_tuple_annotation():\n # When a function has a var-positional parameter annotated with a tuple type,\n # function_schema() should convert it into a field with type List[].", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_var_positional_tuple_annotation", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_var_positional_tuple_annotation():\n # When a function has a var-positional parameter annotated with a tuple type,\n # function_schema() should convert it into a field with type List[].\n def func(*args: tuple[int, ...]) -> int:\n total = 0\n for arg in args:\n total += sum(arg)\n return total\n fs = function_schema(func, use_docstring_info=False)\n properties = fs.params_json_schema.get(\"properties\", {})", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_var_keyword_dict_annotation", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_var_keyword_dict_annotation():\n # Case 3:\n # When a function has a var-keyword parameter annotated with a dict type,\n # function_schema() should convert it into a field with type Dict[, ].\n def func(**kwargs: dict[str, int]):\n return kwargs\n fs = function_schema(func, use_docstring_info=False, strict_json_schema=False)\n properties = fs.params_json_schema.get(\"properties\", {})\n # The name of the field is \"kwargs\", and it's a JSON object i.e. a dict.\n assert properties.get(\"kwargs\").get(\"type\") == \"object\"", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_schema_with_mapping_raises_strict_mode_error", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_schema_with_mapping_raises_strict_mode_error():\n \"\"\"A mapping type is not allowed in strict mode. Same for dicts. Ensure we raise a UserError.\"\"\"\n def func_with_mapping(test_one: Mapping[str, int]) -> str:\n return \"foo\"\n with pytest.raises(UserError):\n function_schema(func_with_mapping)\ndef test_name_override_without_docstring() -> None:\n \"\"\"name_override should be used even when not parsing docstrings.\"\"\"\n def foo(x: int) -> int:\n return x", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_name_override_without_docstring", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_name_override_without_docstring() -> None:\n \"\"\"name_override should be used even when not parsing docstrings.\"\"\"\n def foo(x: int) -> int:\n return x\n fs = function_schema(foo, use_docstring_info=False, name_override=\"custom\")\n assert fs.name == \"custom\"\n assert fs.params_json_schema.get(\"title\") == \"custom_args\"\ndef test_function_with_field_required_constraints():\n \"\"\"Test function with required Field parameter that has constraints.\"\"\"\n def func_with_field_constraints(my_number: int = Field(..., gt=10, le=100)) -> int:", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_required_constraints", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_required_constraints():\n \"\"\"Test function with required Field parameter that has constraints.\"\"\"\n def func_with_field_constraints(my_number: int = Field(..., gt=10, le=100)) -> int:\n return my_number * 2\n fs = function_schema(func_with_field_constraints, use_docstring_info=False)\n # Check that the schema includes the constraints\n properties = fs.params_json_schema.get(\"properties\", {})\n my_number_schema = properties.get(\"my_number\", {})\n assert my_number_schema.get(\"type\") == \"integer\"\n assert my_number_schema.get(\"exclusiveMinimum\") == 10 # gt=10", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_optional_with_default", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_optional_with_default():\n \"\"\"Test function with optional Field parameter that has default and constraints.\"\"\"\n def func_with_optional_field(\n required_param: str,\n optional_param: float = Field(default=5.0, ge=0.0),\n ) -> str:\n return f\"{required_param}: {optional_param}\"\n fs = function_schema(func_with_optional_field, use_docstring_info=False)\n # Check that the schema includes the constraints and description\n properties = fs.params_json_schema.get(\"properties\", {})", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_description_merge", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_description_merge():\n \"\"\"Test that Field descriptions are merged with docstring descriptions.\"\"\"\n def func_with_field_and_docstring(\n param_with_field_desc: int = Field(..., description=\"Field description\"),\n param_with_both: str = Field(default=\"hello\", description=\"Field description\"),\n ) -> str:\n \"\"\"\n Function with both field and docstring descriptions.\n Args:\n param_with_field_desc: Docstring description", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "func_with_field_desc_only", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def func_with_field_desc_only(\n param_with_field_desc: int = Field(..., description=\"Field description only\"),\n param_without_desc: str = Field(default=\"hello\"),\n) -> str:\n return f\"{param_with_field_desc}: {param_without_desc}\"\ndef test_function_with_field_description_only():\n \"\"\"Test that Field descriptions are used when no docstring info.\"\"\"\n fs = function_schema(func_with_field_desc_only)\n # Check that field description is used when no docstring\n properties = fs.params_json_schema.get(\"properties\", {})", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_description_only", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_description_only():\n \"\"\"Test that Field descriptions are used when no docstring info.\"\"\"\n fs = function_schema(func_with_field_desc_only)\n # Check that field description is used when no docstring\n properties = fs.params_json_schema.get(\"properties\", {})\n param1_schema = properties.get(\"param_with_field_desc\", {})\n param2_schema = properties.get(\"param_without_desc\", {})\n assert param1_schema.get(\"description\") == \"Field description only\"\n assert param2_schema.get(\"description\") is None\ndef test_function_with_field_string_constraints():", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_string_constraints", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_string_constraints():\n \"\"\"Test function with Field parameter that has string-specific constraints.\"\"\"\n def func_with_string_field(\n name: str = Field(..., min_length=3, max_length=20, pattern=r\"^[A-Za-z]+$\"),\n ) -> str:\n return f\"Hello, {name}!\"\n fs = function_schema(func_with_string_field, use_docstring_info=False)\n # Check that the schema includes string constraints\n properties = fs.params_json_schema.get(\"properties\", {})\n name_schema = properties.get(\"name\", {})", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "test_function_with_field_multiple_constraints", - "kind": 2, - "importPath": "tests.test_function_schema", - "description": "tests.test_function_schema", - "peekOfCode": "def test_function_with_field_multiple_constraints():\n \"\"\"Test function with multiple Field parameters having different constraint types.\"\"\"\n def func_with_multiple_field_constraints(\n score: int = Field(..., ge=0, le=100, description=\"Score from 0 to 100\"),\n name: str = Field(default=\"Unknown\", min_length=1, max_length=50),\n factor: float = Field(default=1.0, gt=0.0, description=\"Positive multiplier\"),\n ) -> str:\n final_score = score * factor\n return f\"{name} scored {final_score}\"\n fs = function_schema(func_with_multiple_field_constraints, use_docstring_info=False)", - "detail": "tests.test_function_schema", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "class Foo(BaseModel):\n a: int\n b: int = 5\nclass Bar(TypedDict):\n x: str\n y: int\ndef complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "Bar", - "kind": 6, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "class Bar(TypedDict):\n x: str\n y: int\ndef complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():\n tool = function_tool(complex_args_function, failure_error_function=None)\n assert tool.name == \"complex_args_function\"\n valid_json = json.dumps(", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "BoolCtx", - "kind": 6, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "class BoolCtx(BaseModel):\n enable_tools: bool\n@pytest.mark.asyncio\nasync def test_is_enabled_bool_and_callable():\n @function_tool(is_enabled=False)\n def disabled_tool():\n return \"nope\"\n async def cond_enabled(ctx: RunContextWrapper[BoolCtx], agent: AgentBase) -> bool:\n return ctx.context.enable_tools\n @function_tool(is_enabled=cond_enabled)", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "argless_function", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def argless_function() -> str:\n return \"ok\"\n@pytest.mark.asyncio\nasync def test_argless_function():\n tool = function_tool(argless_function)\n assert tool.name == \"argless_function\"\n result = await tool.on_invoke_tool(\n ToolContext(context=None, tool_name=tool.name, tool_call_id=\"1\"), \"\"\n )\n assert result == \"ok\"", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "argless_with_context", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def argless_with_context(ctx: ToolContext[str]) -> str:\n return \"ok\"\n@pytest.mark.asyncio\nasync def test_argless_with_context():\n tool = function_tool(argless_with_context)\n assert tool.name == \"argless_with_context\"\n result = await tool.on_invoke_tool(ToolContext(None, tool_name=tool.name, tool_call_id=\"1\"), \"\")\n assert result == \"ok\"\n # Extra JSON should not raise an error\n result = await tool.on_invoke_tool(", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "simple_function", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def simple_function(a: int, b: int = 5):\n return a + b\n@pytest.mark.asyncio\nasync def test_simple_function():\n tool = function_tool(simple_function, failure_error_function=None)\n assert tool.name == \"simple_function\"\n result = await tool.on_invoke_tool(\n ToolContext(None, tool_name=tool.name, tool_call_id=\"1\"), '{\"a\": 1}'\n )\n assert result == 6", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "complex_args_function", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def complex_args_function(foo: Foo, bar: Bar, baz: str = \"hello\"):\n return f\"{foo.a + foo.b} {bar['x']}{bar['y']} {baz}\"\n@pytest.mark.asyncio\nasync def test_complex_args_function():\n tool = function_tool(complex_args_function, failure_error_function=None)\n assert tool.name == \"complex_args_function\"\n valid_json = json.dumps(\n {\n \"foo\": Foo(a=1).model_dump(),\n \"bar\": Bar(x=\"hello\", y=10),", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "test_function_config_overrides", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def test_function_config_overrides():\n tool = function_tool(simple_function, name_override=\"custom_name\")\n assert tool.name == \"custom_name\"\n tool = function_tool(simple_function, description_override=\"custom description\")\n assert tool.description == \"custom description\"\n tool = function_tool(\n simple_function,\n name_override=\"custom_name\",\n description_override=\"custom description\",\n )", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "test_func_schema_is_strict", - "kind": 2, - "importPath": "tests.test_function_tool", - "description": "tests.test_function_tool", - "peekOfCode": "def test_func_schema_is_strict():\n tool = function_tool(simple_function)\n assert tool.strict_json_schema, \"Should be strict by default\"\n assert (\n \"additionalProperties\" in tool.params_json_schema\n and not tool.params_json_schema[\"additionalProperties\"]\n )\n tool = function_tool(complex_args_function)\n assert tool.strict_json_schema, \"Should be strict by default\"\n assert (", - "detail": "tests.test_function_tool", - "documentation": {} - }, - { - "label": "DummyContext", - "kind": 6, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "class DummyContext:\n def __init__(self):\n self.data = \"something\"\ndef ctx_wrapper() -> ToolContext[DummyContext]:\n return ToolContext(context=DummyContext(), tool_name=\"dummy\", tool_call_id=\"1\")\n@function_tool\ndef sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "ctx_wrapper", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def ctx_wrapper() -> ToolContext[DummyContext]:\n return ToolContext(context=DummyContext(), tool_name=\"dummy\", tool_call_id=\"1\")\n@function_tool\ndef sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():\n tool = sync_no_context_no_args\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"test_1\"", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "sync_no_context_no_args", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def sync_no_context_no_args() -> str:\n return \"test_1\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_no_args_invocation():\n tool = sync_no_context_no_args\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"test_1\"\n@function_tool\ndef sync_no_context_with_args(a: int, b: int) -> int:\n return a + b", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "sync_no_context_with_args", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def sync_no_context_with_args(a: int, b: int) -> int:\n return a + b\n@pytest.mark.asyncio\nasync def test_sync_no_context_with_args_invocation():\n tool = sync_no_context_with_args\n input_data = {\"a\": 5, \"b\": 7}\n output = await tool.on_invoke_tool(ctx_wrapper(), json.dumps(input_data))\n assert int(output) == 12\n@function_tool\ndef sync_with_context(ctx: ToolContext[DummyContext], name: str) -> str:", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "sync_with_context", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def sync_with_context(ctx: ToolContext[DummyContext], name: str) -> str:\n return f\"{name}_{ctx.context.data}\"\n@pytest.mark.asyncio\nasync def test_sync_with_context_invocation():\n tool = sync_with_context\n input_data = {\"name\": \"Alice\"}\n output = await tool.on_invoke_tool(ctx_wrapper(), json.dumps(input_data))\n assert output == \"Alice_something\"\n@function_tool\nasync def async_no_context(a: int, b: int) -> int:", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "sync_no_context_override", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def sync_no_context_override() -> str:\n return \"override_result\"\n@pytest.mark.asyncio\nasync def test_sync_no_context_override_invocation():\n tool = sync_no_context_override\n assert tool.name == \"my_custom_tool\"\n assert tool.description == \"custom desc\"\n output = await tool.on_invoke_tool(ctx_wrapper(), \"\")\n assert output == \"override_result\"\n@function_tool(failure_error_function=None)", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "will_fail_on_bad_json", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def will_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_error_on_invalid_json():\n tool = will_fail_on_bad_json\n # Passing an invalid JSON string\n with pytest.raises(Exception) as exc_info:\n await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert \"Invalid JSON input for tool\" in str(exc_info.value)\ndef sync_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "sync_error_handler", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def sync_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"\n@function_tool(failure_error_function=sync_error_handler)\ndef will_not_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json():\n tool = will_not_fail_on_bad_json\n # Passing an invalid JSON string\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "will_not_fail_on_bad_json", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def will_not_fail_on_bad_json(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json():\n tool = will_not_fail_on_bad_json\n # Passing an invalid JSON string\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"\ndef async_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "async_error_handler", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def async_error_handler(ctx: RunContextWrapper[Any], error: Exception) -> str:\n return f\"error_{error.__class__.__name__}\"\n@function_tool(failure_error_function=sync_error_handler)\ndef will_not_fail_on_bad_json_async(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json_async():\n tool = will_not_fail_on_bad_json_async\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "will_not_fail_on_bad_json_async", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def will_not_fail_on_bad_json_async(x: int) -> int:\n return x * 2 # pragma: no cover\n@pytest.mark.asyncio\nasync def test_no_error_on_invalid_json_async():\n tool = will_not_fail_on_bad_json_async\n result = await tool.on_invoke_tool(ctx_wrapper(), \"{not valid json}\")\n assert result == \"error_ModelBehaviorError\"\n@function_tool(strict_mode=False)\ndef optional_param_function(a: int, b: Optional[int] = None) -> str:\n if b is None:", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "optional_param_function", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def optional_param_function(a: int, b: Optional[int] = None) -> str:\n if b is None:\n return f\"{a}_no_b\"\n return f\"{a}_{b}\"\n@pytest.mark.asyncio\nasync def test_non_strict_mode_function():\n tool = optional_param_function\n assert tool.strict_json_schema is False, \"strict_json_schema should be False\"\n assert tool.params_json_schema.get(\"required\") == [\"a\"], \"required should only be a\"\n input_data = {\"a\": 5}", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "all_optional_params_function", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def all_optional_params_function(\n x: int = 42,\n y: str = \"hello\",\n z: Optional[int] = None,\n) -> str:\n if z is None:\n return f\"{x}_{y}_no_z\"\n return f\"{x}_{y}_{z}\"\n@pytest.mark.asyncio\nasync def test_all_optional_params_function():", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "get_weather", - "kind": 2, - "importPath": "tests.test_function_tool_decorator", - "description": "tests.test_function_tool_decorator", - "peekOfCode": "def get_weather(city: str) -> str:\n \"\"\"Get the weather for a given city.\n Args:\n city: The city to get the weather for.\n \"\"\"\n return f\"The weather in {city} is sunny.\"\n@pytest.mark.asyncio\nasync def test_extract_descriptions_from_docstring():\n \"\"\"Ensure that we extract function and param descriptions from docstrings.\"\"\"\n tool = get_weather", - "detail": "tests.test_function_tool_decorator", - "documentation": {} - }, - { - "label": "RunHooksForTests", - "kind": 6, - "importPath": "tests.test_global_hooks", - "description": "tests.test_global_hooks", - "peekOfCode": "class RunHooksForTests(RunHooks):\n def __init__(self):\n self.events: dict[str, int] = defaultdict(int)\n def reset(self):\n self.events.clear()\n async def on_agent_start(\n self, context: RunContextWrapper[TContext], agent: Agent[TContext]\n ) -> None:\n self.events[\"on_agent_start\"] += 1\n async def on_agent_end(", - "detail": "tests.test_global_hooks", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_global_hooks", - "description": "tests.test_global_hooks", - "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_agent_hooks():\n hooks = RunHooksForTests()\n model = FakeModel()\n agent_1 = Agent(name=\"test_1\", model=model)\n agent_2 = Agent(name=\"test_2\", model=model)\n agent_3 = Agent(\n name=\"test_3\",", - "detail": "tests.test_global_hooks", - "documentation": {} - }, - { - "label": "get_sync_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def get_sync_guardrail(triggers: bool, output_info: Any | None = None):\n def sync_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return sync_guardrail\n@pytest.mark.asyncio", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "get_async_input_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def get_async_input_guardrail(triggers: bool, output_info: Any | None = None):\n async def async_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return async_guardrail\n@pytest.mark.asyncio", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "get_sync_output_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def get_sync_output_guardrail(triggers: bool, output_info: Any | None = None):\n def sync_guardrail(context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return sync_guardrail\n@pytest.mark.asyncio\nasync def test_sync_output_guardrail():\n guardrail = OutputGuardrail(guardrail_function=get_sync_output_guardrail(triggers=False))", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "get_async_output_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def get_async_output_guardrail(triggers: bool, output_info: Any | None = None):\n async def async_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n ):\n return GuardrailFunctionOutput(\n output_info=output_info,\n tripwire_triggered=triggers,\n )\n return async_guardrail\n@pytest.mark.asyncio", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "decorated_input_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def decorated_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_1\",\n tripwire_triggered=False,\n )\n@input_guardrail(name=\"Custom name\")\ndef decorated_named_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "decorated_named_input_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def decorated_named_input_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_2\",\n tripwire_triggered=False,\n )\n@pytest.mark.asyncio\nasync def test_input_guardrail_decorators():\n guardrail = decorated_input_guardrail", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "decorated_output_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def decorated_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_3\",\n tripwire_triggered=False,\n )\n@output_guardrail(name=\"Custom name\")\ndef decorated_named_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "decorated_named_output_guardrail", - "kind": 2, - "importPath": "tests.test_guardrails", - "description": "tests.test_guardrails", - "peekOfCode": "def decorated_named_output_guardrail(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=\"test_4\",\n tripwire_triggered=False,\n )\n@pytest.mark.asyncio\nasync def test_output_guardrail_decorators():\n guardrail = decorated_output_guardrail", - "detail": "tests.test_guardrails", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_handoff_input_type():\n async def _on_handoff(ctx: RunContextWrapper[Any], input: Foo):\n pass\n agent = Agent(name=\"test\")\n obj = handoff(agent, input_type=Foo, on_handoff=_on_handoff)\n for key, value in Foo.model_json_schema().items():\n assert obj.input_json_schema[key] == value", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "message_item", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def message_item(content: str, agent: Agent[Any]) -> MessageOutputItem:\n return MessageOutputItem(\n agent=agent,\n raw_item=ResponseOutputMessage(\n id=\"123\",\n status=\"completed\",\n role=\"assistant\",\n type=\"message\",\n content=[ResponseOutputText(text=content, type=\"output_text\", annotations=[])],\n ),", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "get_len", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def get_len(data: HandoffInputData) -> int:\n input_len = len(data.input_history) if isinstance(data.input_history, tuple) else 1\n pre_handoff_len = len(data.pre_handoff_items)\n new_items_len = len(data.new_items)\n return input_len + pre_handoff_len + new_items_len\n@pytest.mark.asyncio\nasync def test_single_handoff_setup():\n agent_1 = Agent(name=\"test_1\")\n agent_2 = Agent(name=\"test_2\", handoffs=[agent_1])\n assert not agent_1.handoffs", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "test_handoff_input_data", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def test_handoff_input_data():\n agent = Agent(name=\"test\")\n data = HandoffInputData(\n input_history=\"\",\n pre_handoff_items=(),\n new_items=(),\n )\n assert get_len(data) == 1\n data = HandoffInputData(\n input_history=({\"role\": \"user\", \"content\": \"foo\"},),", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "test_handoff_input_schema_is_strict", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def test_handoff_input_schema_is_strict():\n agent = Agent(name=\"test\")\n obj = handoff(agent, input_type=Foo, on_handoff=lambda ctx, input: None)\n for key, value in Foo.model_json_schema().items():\n assert obj.input_json_schema[key] == value\n assert obj.strict_json_schema, \"Input schema should be strict\"\n assert (\n \"additionalProperties\" in obj.input_json_schema\n and not obj.input_json_schema[\"additionalProperties\"]\n ), \"Input schema should be strict and have additionalProperties=False\"", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "test_get_transfer_message_is_valid_json", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def test_get_transfer_message_is_valid_json() -> None:\n agent = Agent(name=\"foo\")\n obj = handoff(agent)\n transfer = obj.get_transfer_message(agent)\n assert json.loads(transfer) == {\"assistant\": agent.name}\ndef test_handoff_is_enabled_bool():\n \"\"\"Test that handoff respects is_enabled boolean parameter.\"\"\"\n agent = Agent(name=\"test\")\n # Test enabled handoff (default)\n handoff_enabled = handoff(agent)", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "test_handoff_is_enabled_bool", - "kind": 2, - "importPath": "tests.test_handoff_tool", - "description": "tests.test_handoff_tool", - "peekOfCode": "def test_handoff_is_enabled_bool():\n \"\"\"Test that handoff respects is_enabled boolean parameter.\"\"\"\n agent = Agent(name=\"test\")\n # Test enabled handoff (default)\n handoff_enabled = handoff(agent)\n assert handoff_enabled.is_enabled is True\n # Test explicitly enabled handoff\n handoff_explicit_enabled = handoff(agent, is_enabled=True)\n assert handoff_explicit_enabled.is_enabled is True\n # Test disabled handoff", - "detail": "tests.test_handoff_tool", - "documentation": {} - }, - { - "label": "make_message", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def make_message(\n content_items: list[ResponseOutputText | ResponseOutputRefusal],\n) -> ResponseOutputMessage:\n \"\"\"\n Helper to construct a ResponseOutputMessage with a single batch of content\n items, using a fixed id/status.\n \"\"\"\n return ResponseOutputMessage(\n id=\"msg123\",\n content=content_items,", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_extract_last_content_of_text_message", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_extract_last_content_of_text_message() -> None:\n # Build a message containing two text segments.\n content1 = ResponseOutputText(annotations=[], text=\"Hello \", type=\"output_text\")\n content2 = ResponseOutputText(annotations=[], text=\"world!\", type=\"output_text\")\n message = make_message([content1, content2])\n # Helpers should yield the last segment's text.\n assert ItemHelpers.extract_last_content(message) == \"world!\"\ndef test_extract_last_content_of_refusal_message() -> None:\n # Build a message whose last content entry is a refusal.\n content1 = ResponseOutputText(annotations=[], text=\"Before refusal\", type=\"output_text\")", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_extract_last_content_of_refusal_message", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_extract_last_content_of_refusal_message() -> None:\n # Build a message whose last content entry is a refusal.\n content1 = ResponseOutputText(annotations=[], text=\"Before refusal\", type=\"output_text\")\n refusal = ResponseOutputRefusal(refusal=\"I cannot do that\", type=\"refusal\")\n message = make_message([content1, refusal])\n # Helpers should extract the refusal string when last content is a refusal.\n assert ItemHelpers.extract_last_content(message) == \"I cannot do that\"\ndef test_extract_last_content_non_message_returns_empty() -> None:\n # Construct some other type of output item, e.g. a tool call, to verify non-message returns \"\".\n tool_call = ResponseFunctionToolCall(", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_extract_last_content_non_message_returns_empty", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_extract_last_content_non_message_returns_empty() -> None:\n # Construct some other type of output item, e.g. a tool call, to verify non-message returns \"\".\n tool_call = ResponseFunctionToolCall(\n id=\"tool123\",\n arguments=\"{}\",\n call_id=\"call123\",\n name=\"func\",\n type=\"function_call\",\n )\n assert ItemHelpers.extract_last_content(tool_call) == \"\"", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_extract_last_text_returns_text_only", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_extract_last_text_returns_text_only() -> None:\n # A message whose last segment is text yields the text.\n first_text = ResponseOutputText(annotations=[], text=\"part1\", type=\"output_text\")\n second_text = ResponseOutputText(annotations=[], text=\"part2\", type=\"output_text\")\n message = make_message([first_text, second_text])\n assert ItemHelpers.extract_last_text(message) == \"part2\"\n # Whereas when last content is a refusal, extract_last_text returns None.\n message2 = make_message([first_text, ResponseOutputRefusal(refusal=\"no\", type=\"refusal\")])\n assert ItemHelpers.extract_last_text(message2) is None\ndef test_input_to_new_input_list_from_string() -> None:", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_input_to_new_input_list_from_string", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_input_to_new_input_list_from_string() -> None:\n result = ItemHelpers.input_to_new_input_list(\"hi\")\n # Should wrap the string into a list with a single dict containing content and user role.\n assert isinstance(result, list)\n assert result == [{\"content\": \"hi\", \"role\": \"user\"}]\ndef test_input_to_new_input_list_deep_copies_lists() -> None:\n # Given a list of message dictionaries, ensure the returned list is a deep copy.\n original: list[TResponseInputItem] = [{\"content\": \"abc\", \"role\": \"developer\"}]\n new_list = ItemHelpers.input_to_new_input_list(original)\n assert new_list == original", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_input_to_new_input_list_deep_copies_lists", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_input_to_new_input_list_deep_copies_lists() -> None:\n # Given a list of message dictionaries, ensure the returned list is a deep copy.\n original: list[TResponseInputItem] = [{\"content\": \"abc\", \"role\": \"developer\"}]\n new_list = ItemHelpers.input_to_new_input_list(original)\n assert new_list == original\n # Mutating the returned list should not mutate the original.\n new_list.pop()\n assert \"content\" in original[0] and original[0].get(\"content\") == \"abc\"\ndef test_text_message_output_concatenates_text_segments() -> None:\n # Build a message with both text and refusal segments, only text segments are concatenated.", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_text_message_output_concatenates_text_segments", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_text_message_output_concatenates_text_segments() -> None:\n # Build a message with both text and refusal segments, only text segments are concatenated.\n pieces: list[ResponseOutputText | ResponseOutputRefusal] = []\n pieces.append(ResponseOutputText(annotations=[], text=\"a\", type=\"output_text\"))\n pieces.append(ResponseOutputRefusal(refusal=\"denied\", type=\"refusal\"))\n pieces.append(ResponseOutputText(annotations=[], text=\"b\", type=\"output_text\"))\n message = make_message(pieces)\n # Wrap into MessageOutputItem to feed into text_message_output.\n item = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message)\n assert ItemHelpers.text_message_output(item) == \"ab\"", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_text_message_outputs_across_list_of_runitems", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_text_message_outputs_across_list_of_runitems() -> None:\n \"\"\"\n Compose several RunItem instances, including a non-message run item, and ensure\n that only MessageOutputItem instances contribute any text. The non-message\n (ReasoningItem) should be ignored by Helpers.text_message_outputs.\n \"\"\"\n message1 = make_message([ResponseOutputText(annotations=[], text=\"foo\", type=\"output_text\")])\n message2 = make_message([ResponseOutputText(annotations=[], text=\"bar\", type=\"output_text\")])\n item1: RunItem = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message1)\n item2: RunItem = MessageOutputItem(agent=Agent(name=\"test\"), raw_item=message2)", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_tool_call_output_item_constructs_function_call_output_dict", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_tool_call_output_item_constructs_function_call_output_dict():\n # Build a simple ResponseFunctionToolCall.\n call = ResponseFunctionToolCall(\n id=\"call-abc\",\n arguments='{\"x\": 1}',\n call_id=\"call-abc\",\n name=\"do_something\",\n type=\"function_call\",\n )\n payload = ItemHelpers.tool_call_output_item(call, \"result-string\")", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_message", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_message() -> None:\n \"\"\"An output message should convert into an input dict matching the message's own structure.\"\"\"\n content = ResponseOutputText(annotations=[], text=\"hello world\", type=\"output_text\")\n message = ResponseOutputMessage(\n id=\"m1\", content=[content], role=\"assistant\", status=\"completed\", type=\"message\"\n )\n resp = ModelResponse(output=[message], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n # The dict should contain exactly the primitive values of the message", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_function_call", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_function_call() -> None:\n \"\"\"A function tool call output should produce the same dict as a function tool call input.\"\"\"\n tool_call = ResponseFunctionToolCall(\n id=\"f1\", arguments=\"{}\", call_id=\"c1\", name=\"func\", type=\"function_call\"\n )\n resp = ModelResponse(output=[tool_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n expected: ResponseFunctionToolCallParam = {\n \"id\": \"f1\",", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_file_search_call", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_file_search_call() -> None:\n \"\"\"A file search tool call output should produce the same dict as a file search input.\"\"\"\n fs_call = ResponseFileSearchToolCall(\n id=\"fs1\", queries=[\"query\"], status=\"completed\", type=\"file_search_call\"\n )\n resp = ModelResponse(output=[fs_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n expected: ResponseFileSearchToolCallParam = {\n \"id\": \"fs1\",", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_web_search_call", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_web_search_call() -> None:\n \"\"\"A web search tool call output should produce the same dict as a web search input.\"\"\"\n ws_call = ResponseFunctionWebSearch(\n id=\"w1\",\n action=ActionSearch(type=\"search\", query=\"query\"),\n status=\"completed\",\n type=\"web_search_call\",\n )\n resp = ModelResponse(output=[ws_call], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_computer_call_click", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_computer_call_click() -> None:\n \"\"\"A computer call output should yield a dict whose shape matches the computer call input.\"\"\"\n action = ActionScreenshot(type=\"screenshot\")\n comp_call = ResponseComputerToolCall(\n id=\"comp1\",\n action=action,\n type=\"computer_call\",\n call_id=\"comp1\",\n pending_safety_checks=[],\n status=\"completed\",", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "test_to_input_items_for_reasoning", - "kind": 2, - "importPath": "tests.test_items_helpers", - "description": "tests.test_items_helpers", - "peekOfCode": "def test_to_input_items_for_reasoning() -> None:\n \"\"\"A reasoning output should produce the same dict as a reasoning input item.\"\"\"\n rc = Summary(text=\"why\", type=\"summary_text\")\n reasoning = ResponseReasoningItem(id=\"rid1\", summary=[rc], type=\"reasoning\")\n resp = ModelResponse(output=[reasoning], usage=Usage(), response_id=None)\n input_items = resp.to_input_items()\n assert isinstance(input_items, list) and len(input_items) == 1\n converted_dict = input_items[0]\n expected: ResponseReasoningItemParam = {\n \"id\": \"rid1\",", - "detail": "tests.test_items_helpers", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_max_turns", - "description": "tests.test_max_turns", - "peekOfCode": "class Foo(TypedDict):\n a: str\n@pytest.mark.asyncio\nasync def test_structured_output_non_streamed_max_turns():\n model = FakeModel()\n agent = Agent(\n name=\"test_1\",\n model=model,\n output_type=Foo,\n tools=[get_function_tool(\"tool_1\", \"result\")],", - "detail": "tests.test_max_turns", - "documentation": {} - }, - { - "label": "test_store_param", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions", - "description": "tests.test_openai_chatcompletions", - "peekOfCode": "def test_store_param():\n \"\"\"Should default to True for OpenAI API calls, and False otherwise.\"\"\"\n model_settings = ModelSettings()\n client = AsyncOpenAI()\n assert ChatCmplHelpers.get_store_param(client, model_settings) is True, (\n \"Should default to True for OpenAI API calls\"\n )\n model_settings = ModelSettings(store=False)\n assert ChatCmplHelpers.get_store_param(client, model_settings) is False, (\n \"Should respect explicitly set store=False\"", - "detail": "tests.test_openai_chatcompletions", - "documentation": {} - }, - { - "label": "TestObject", - "kind": 6, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "class TestObject:\n pass\ndef test_unknown_object_errors():\n \"\"\"\n Test that unknown objects are converted correctly.\n \"\"\"\n with pytest.raises(UserError, match=\"Unhandled item type or structure\"):\n # Purposely ignore the type error\n Converter.items_to_messages([TestObject()]) # type: ignore\ndef test_assistant_messages_in_history():", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_message_to_output_items_with_text_only", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_message_to_output_items_with_text_only():\n \"\"\"\n Make sure a simple ChatCompletionMessage with string content is converted\n into a single ResponseOutputMessage containing one ResponseOutputText.\n \"\"\"\n msg = ChatCompletionMessage(role=\"assistant\", content=\"Hello\")\n items = Converter.message_to_output_items(msg)\n # Expect exactly one output item (the message)\n assert len(items) == 1\n message_item = cast(ResponseOutputMessage, items[0])", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_message_to_output_items_with_refusal", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_message_to_output_items_with_refusal():\n \"\"\"\n Make sure a message with a refusal string produces a ResponseOutputMessage\n with a ResponseOutputRefusal content part.\n \"\"\"\n msg = ChatCompletionMessage(role=\"assistant\", refusal=\"I'm sorry\")\n items = Converter.message_to_output_items(msg)\n assert len(items) == 1\n message_item = cast(ResponseOutputMessage, items[0])\n assert len(message_item.content) == 1", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_message_to_output_items_with_tool_call", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_message_to_output_items_with_tool_call():\n \"\"\"\n If the ChatCompletionMessage contains one or more tool_calls, they should\n be reflected as separate `ResponseFunctionToolCall` items appended after\n the message item.\n \"\"\"\n tool_call = ChatCompletionMessageToolCall(\n id=\"tool1\",\n type=\"function\",\n function=Function(name=\"myfn\", arguments='{\"x\":1}'),", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_items_to_messages_with_string_user_content", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_items_to_messages_with_string_user_content():\n \"\"\"\n A simple string as the items argument should be converted into a user\n message param dict with the same content.\n \"\"\"\n result = Converter.items_to_messages(\"Ask me anything\")\n assert isinstance(result, list)\n assert len(result) == 1\n msg = result[0]\n assert msg[\"role\"] == \"user\"", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_items_to_messages_with_easy_input_message", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_items_to_messages_with_easy_input_message():\n \"\"\"\n Given an easy input message dict (just role/content), the converter should\n produce the appropriate ChatCompletionMessageParam with the same content.\n \"\"\"\n items: list[TResponseInputItem] = [\n {\n \"role\": \"user\",\n \"content\": \"How are you?\",\n }", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_items_to_messages_with_output_message_and_function_call", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_items_to_messages_with_output_message_and_function_call():\n \"\"\"\n Given a sequence of one ResponseOutputMessageParam followed by a\n ResponseFunctionToolCallParam, the converter should produce a single\n ChatCompletionAssistantMessageParam that includes both the assistant's\n textual content and a populated `tool_calls` reflecting the function call.\n \"\"\"\n # Construct output message param dict with two content parts.\n output_text: ResponseOutputText = ResponseOutputText(\n text=\"Part 1\",", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_convert_tool_choice_handles_standard_and_named_options", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_convert_tool_choice_handles_standard_and_named_options() -> None:\n \"\"\"\n The `Converter.convert_tool_choice` method should return NOT_GIVEN\n if no choice is provided, pass through values like \"auto\", \"required\",\n or \"none\" unchanged, and translate any other string into a function\n selection dict.\n \"\"\"\n assert Converter.convert_tool_choice(None).__class__.__name__ == \"NotGiven\"\n assert Converter.convert_tool_choice(\"auto\") == \"auto\"\n assert Converter.convert_tool_choice(\"required\") == \"required\"", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas() -> None:\n \"\"\"\n The `Converter.convert_response_format` method should return NOT_GIVEN\n when no output schema is provided or if the output schema indicates\n plain text. For structured output schemas, it should return a dict\n with type `json_schema` and include the generated JSON schema and\n strict flag from the provided `AgentOutputSchema`.\n \"\"\"\n # when output is plain text (schema None or output_type str), do not include response_format\n assert Converter.convert_response_format(None).__class__.__name__ == \"NotGiven\"", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_items_to_messages_with_function_output_item", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_items_to_messages_with_function_output_item():\n \"\"\"\n A function call output item should be converted into a tool role message\n dict with the appropriate tool_call_id and content.\n \"\"\"\n func_output_item: FunctionCallOutput = {\n \"type\": \"function_call_output\",\n \"call_id\": \"somecall\",\n \"output\": '{\"foo\": \"bar\"}',\n }", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_extract_all_and_text_content_for_strings_and_lists", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_extract_all_and_text_content_for_strings_and_lists():\n \"\"\"\n The converter provides helpers for extracting user-supplied message content\n either as a simple string or as a list of `input_text` dictionaries.\n When passed a bare string, both `extract_all_content` and\n `extract_text_content` should return the string unchanged.\n When passed a list of input dictionaries, `extract_all_content` should\n produce a list of `ChatCompletionContentPart` dicts, and `extract_text_content`\n should filter to only the textual parts.\n \"\"\"", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_items_to_messages_handles_system_and_developer_roles", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_items_to_messages_handles_system_and_developer_roles():\n \"\"\"\n Roles other than `user` (e.g. `system` and `developer`) need to be\n converted appropriately whether provided as simple dicts or as full\n `message` typed dicts.\n \"\"\"\n sys_items: list[TResponseInputItem] = [{\"role\": \"system\", \"content\": \"setup\"}]\n sys_msgs = Converter.items_to_messages(sys_items)\n assert len(sys_msgs) == 1\n assert sys_msgs[0][\"role\"] == \"system\"", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_maybe_input_message_allows_message_typed_dict", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_maybe_input_message_allows_message_typed_dict():\n \"\"\"\n The `Converter.maybe_input_message` should recognize a dict with\n \"type\": \"message\" and a supported role as an input message. Ensure\n that such dicts are passed through by `items_to_messages`.\n \"\"\"\n # Construct a dict with the proper required keys for a ResponseInputParam.Message\n message_dict: TResponseInputItem = {\n \"type\": \"message\",\n \"role\": \"user\",", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_tool_call_conversion", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_tool_call_conversion():\n \"\"\"\n Test that tool calls are converted correctly.\n \"\"\"\n function_call = ResponseFunctionToolCallParam(\n id=\"tool1\",\n call_id=\"abc\",\n name=\"math\",\n arguments=\"{}\",\n type=\"function_call\",", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_input_message_with_all_roles", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_input_message_with_all_roles(role: str):\n \"\"\"\n The `Converter.maybe_input_message` should recognize a dict with\n \"type\": \"message\" and a supported role as an input message. Ensure\n that such dicts are passed through by `items_to_messages`.\n \"\"\"\n # Construct a dict with the proper required keys for a ResponseInputParam.Message\n casted_role = cast(Literal[\"user\", \"system\", \"developer\"], role)\n message_dict: TResponseInputItem = {\n \"type\": \"message\",", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_item_reference_errors", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_item_reference_errors():\n \"\"\"\n Test that item references are converted correctly.\n \"\"\"\n with pytest.raises(UserError):\n Converter.items_to_messages(\n [\n {\n \"type\": \"item_reference\",\n \"id\": \"item1\",", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_unknown_object_errors", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_unknown_object_errors():\n \"\"\"\n Test that unknown objects are converted correctly.\n \"\"\"\n with pytest.raises(UserError, match=\"Unhandled item type or structure\"):\n # Purposely ignore the type error\n Converter.items_to_messages([TestObject()]) # type: ignore\ndef test_assistant_messages_in_history():\n \"\"\"\n Test that assistant messages are added to the history.", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_assistant_messages_in_history", - "kind": 2, - "importPath": "tests.test_openai_chatcompletions_converter", - "description": "tests.test_openai_chatcompletions_converter", - "peekOfCode": "def test_assistant_messages_in_history():\n \"\"\"\n Test that assistant messages are added to the history.\n \"\"\"\n messages = Converter.items_to_messages(\n [\n {\n \"role\": \"user\",\n \"content\": \"Hello\",\n },", - "detail": "tests.test_openai_chatcompletions_converter", - "documentation": {} - }, - { - "label": "test_convert_tool_choice_standard_values", - "kind": 2, - "importPath": "tests.test_openai_responses_converter", - "description": "tests.test_openai_responses_converter", - "peekOfCode": "def test_convert_tool_choice_standard_values():\n \"\"\"\n Make sure that the standard tool_choice values map to themselves or\n to \"auto\"/\"required\"/\"none\" as appropriate, and that special string\n values map to the appropriate dicts.\n \"\"\"\n assert Converter.convert_tool_choice(None) is NOT_GIVEN\n assert Converter.convert_tool_choice(\"auto\") == \"auto\"\n assert Converter.convert_tool_choice(\"required\") == \"required\"\n assert Converter.convert_tool_choice(\"none\") == \"none\"", - "detail": "tests.test_openai_responses_converter", - "documentation": {} - }, - { - "label": "test_get_response_format_plain_text_and_json_schema", - "kind": 2, - "importPath": "tests.test_openai_responses_converter", - "description": "tests.test_openai_responses_converter", - "peekOfCode": "def test_get_response_format_plain_text_and_json_schema():\n \"\"\"\n For plain text output (default, or output type of `str`), the converter\n should return NOT_GIVEN, indicating no special response format constraint.\n If an output schema is provided for a structured type, the converter\n should return a `format` dict with the schema and strictness. The exact\n JSON schema depends on the output type; we just assert that required\n keys are present and that we get back the original schema.\n \"\"\"\n # Default output (None) should be considered plain text.", - "detail": "tests.test_openai_responses_converter", - "documentation": {} - }, - { - "label": "test_convert_tools_basic_types_and_includes", - "kind": 2, - "importPath": "tests.test_openai_responses_converter", - "description": "tests.test_openai_responses_converter", - "peekOfCode": "def test_convert_tools_basic_types_and_includes():\n \"\"\"\n Construct a variety of tool types and make sure `convert_tools` returns\n a matching list of tool param dicts and the expected includes. Also\n check that only a single computer tool is allowed.\n \"\"\"\n # Simple function tool\n tool_fn = function_tool(lambda a: \"x\", name_override=\"fn\")\n # File search tool with include_search_results set\n file_tool = FileSearchTool(", - "detail": "tests.test_openai_responses_converter", - "documentation": {} - }, - { - "label": "test_convert_tools_includes_handoffs", - "kind": 2, - "importPath": "tests.test_openai_responses_converter", - "description": "tests.test_openai_responses_converter", - "peekOfCode": "def test_convert_tools_includes_handoffs():\n \"\"\"\n When handoff objects are included, `convert_tools` should append their\n tool param dicts after tools and include appropriate descriptions.\n \"\"\"\n agent = Agent(name=\"support\", handoff_description=\"Handles support\")\n handoff_obj = handoff(agent)\n converted = Converter.convert_tools(tools=[], handoffs=[handoff_obj])\n assert isinstance(converted.tools, list)\n assert len(converted.tools) == 1", - "detail": "tests.test_openai_responses_converter", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "class Foo(BaseModel):\n bar: str\ndef test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Foo, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"Pydantic objects should not be wrapped\"\n for key, value in Foo.model_json_schema().items():", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "Bar", - "kind": 6, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "class Bar(TypedDict):\n bar: str\ndef test_structured_output_typed_dict():\n agent = Agent(name=\"test\", output_type=Bar)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Bar, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"TypedDicts should not be wrapped\"\n json_str = json.dumps(Bar(bar=\"baz\"))", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "CustomOutputSchema", - "kind": 6, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "class CustomOutputSchema(AgentOutputSchemaBase):\n def is_plain_text(self) -> bool:\n return False\n def name(self) -> str:\n return \"FooBarBaz\"\n def json_schema(self) -> dict[str, Any]:\n return _CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA\n def is_strict_json_schema(self) -> bool:\n return False\n def validate_json(self, json_str: str) -> Any:", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_plain_text_output", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_plain_text_output():\n agent = Agent(name=\"test\")\n output_schema = AgentRunner._get_output_schema(agent)\n assert not output_schema, \"Shouldn't have an output tool config without an output type\"\n agent = Agent(name=\"test\", output_type=str)\n assert not output_schema, \"Shouldn't have an output tool config with str output type\"\nclass Foo(BaseModel):\n bar: str\ndef test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_structured_output_pydantic", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_structured_output_pydantic():\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Foo, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"Pydantic objects should not be wrapped\"\n for key, value in Foo.model_json_schema().items():\n assert output_schema.json_schema()[key] == value\n json_str = Foo(bar=\"baz\").model_dump_json()", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_structured_output_typed_dict", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_structured_output_typed_dict():\n agent = Agent(name=\"test\", output_type=Bar)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == Bar, \"Should have the correct output type\"\n assert not output_schema._is_wrapped, \"TypedDicts should not be wrapped\"\n json_str = json.dumps(Bar(bar=\"baz\"))\n validated = output_schema.validate_json(json_str)\n assert validated == Bar(bar=\"baz\")", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_structured_output_list", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_structured_output_list():\n agent = Agent(name=\"test\", output_type=list[str])\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, AgentOutputSchema)\n assert output_schema.output_type == list[str], \"Should have the correct output type\"\n assert output_schema._is_wrapped, \"Lists should be wrapped\"\n # This is testing implementation details, but it's useful to make sure this doesn't break\n json_str = json.dumps({_WRAPPER_DICT_KEY: [\"foo\", \"bar\"]})\n validated = output_schema.validate_json(json_str)", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_bad_json_raises_error", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_bad_json_raises_error(mocker):\n agent = Agent(name=\"test\", output_type=Foo)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n with pytest.raises(ModelBehaviorError):\n output_schema.validate_json(\"not valid json\")\n agent = Agent(name=\"test\", output_type=list[str])\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n mock_validate_json = mocker.patch.object(_json, \"validate_json\")", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_plain_text_obj_doesnt_produce_schema", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_plain_text_obj_doesnt_produce_schema():\n output_wrapper = AgentOutputSchema(output_type=str)\n with pytest.raises(UserError):\n output_wrapper.json_schema()\ndef test_structured_output_is_strict():\n output_wrapper = AgentOutputSchema(output_type=Foo)\n assert output_wrapper.is_strict_json_schema()\n for key, value in Foo.model_json_schema().items():\n assert output_wrapper.json_schema()[key] == value\n assert (", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_structured_output_is_strict", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_structured_output_is_strict():\n output_wrapper = AgentOutputSchema(output_type=Foo)\n assert output_wrapper.is_strict_json_schema()\n for key, value in Foo.model_json_schema().items():\n assert output_wrapper.json_schema()[key] == value\n assert (\n \"additionalProperties\" in output_wrapper.json_schema()\n and not output_wrapper.json_schema()[\"additionalProperties\"]\n )\ndef test_setting_strict_false_works():", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_setting_strict_false_works", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_setting_strict_false_works():\n output_wrapper = AgentOutputSchema(output_type=Foo, strict_json_schema=False)\n assert not output_wrapper.is_strict_json_schema()\n assert output_wrapper.json_schema() == Foo.model_json_schema()\n assert output_wrapper.json_schema() == Foo.model_json_schema()\n_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n },", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "test_custom_output_schema", - "kind": 2, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "def test_custom_output_schema():\n custom_output_schema = CustomOutputSchema()\n agent = Agent(name=\"test\", output_type=custom_output_schema)\n output_schema = AgentRunner._get_output_schema(agent)\n assert output_schema, \"Should have an output tool config with a structured output type\"\n assert isinstance(output_schema, CustomOutputSchema)\n assert output_schema.json_schema() == _CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA\n assert not output_schema.is_strict_json_schema()\n assert not output_schema.is_plain_text()\n json_str = json.dumps({\"foo\": \"bar\"})", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA", - "kind": 5, - "importPath": "tests.test_output_tool", - "description": "tests.test_output_tool", - "peekOfCode": "_CUSTOM_OUTPUT_SCHEMA_JSON_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"foo\": {\"type\": \"string\"},\n },\n \"required\": [\"foo\"],\n}\nclass CustomOutputSchema(AgentOutputSchemaBase):\n def is_plain_text(self) -> bool:\n return False", - "detail": "tests.test_output_tool", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_pretty_print", - "description": "tests.test_pretty_print", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_pretty_run_result_structured_output():\n model = FakeModel()\n model.set_next_output(\n [\n get_text_message(\"Test\"),\n get_final_output_message(Foo(bar=\"Hi there\").model_dump_json()),\n ]", - "detail": "tests.test_pretty_print", - "documentation": {} - }, - { - "label": "create_content_delta", - "kind": 2, - "importPath": "tests.test_reasoning_content", - "description": "tests.test_reasoning_content", - "peekOfCode": "def create_content_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with regular content\"\"\"\n return {\"content\": content, \"role\": None, \"function_call\": None, \"tool_calls\": None}\ndef create_reasoning_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with reasoning content. The Only difference is reasoning_content\"\"\"\n return {\n \"content\": None,\n \"role\": None,\n \"function_call\": None,\n \"tool_calls\": None,", - "detail": "tests.test_reasoning_content", - "documentation": {} - }, - { - "label": "create_reasoning_delta", - "kind": 2, - "importPath": "tests.test_reasoning_content", - "description": "tests.test_reasoning_content", - "peekOfCode": "def create_reasoning_delta(content: str) -> dict[str, Any]:\n \"\"\"Create a delta dictionary with reasoning content. The Only difference is reasoning_content\"\"\"\n return {\n \"content\": None,\n \"role\": None,\n \"function_call\": None,\n \"tool_calls\": None,\n \"reasoning_content\": content,\n }\ndef create_chunk(delta: dict[str, Any], include_usage: bool = False) -> ChatCompletionChunk:", - "detail": "tests.test_reasoning_content", - "documentation": {} - }, - { - "label": "create_chunk", - "kind": 2, - "importPath": "tests.test_reasoning_content", - "description": "tests.test_reasoning_content", - "peekOfCode": "def create_chunk(delta: dict[str, Any], include_usage: bool = False) -> ChatCompletionChunk:\n \"\"\"Create a ChatCompletionChunk with the given delta\"\"\"\n # Create a ChoiceDelta object from the dictionary\n delta_obj = ChoiceDelta(\n content=delta.get(\"content\"),\n role=delta.get(\"role\"),\n function_call=delta.get(\"function_call\"),\n tool_calls=delta.get(\"tool_calls\"),\n )\n # Add reasoning_content attribute dynamically if present in the delta", - "detail": "tests.test_reasoning_content", - "documentation": {} - }, - { - "label": "get_text_input_item", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_text_input_item(content: str) -> TResponseInputItem:\n return {\n \"content\": content,\n \"role\": \"user\",\n }\ndef get_text_message(content: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "get_text_message", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_text_message(content: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",\n content=[ResponseOutputText(text=content, type=\"output_text\", annotations=[])],\n status=\"completed\",\n )\ndef get_function_tool(\n name: str | None = None, return_value: str | None = None, hide_errors: bool = False", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "get_function_tool", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_function_tool(\n name: str | None = None, return_value: str | None = None, hide_errors: bool = False\n) -> FunctionTool:\n def _foo() -> str:\n return return_value or \"result_ok\"\n return function_tool(\n _foo,\n name_override=name,\n failure_error_function=None if hide_errors else default_tool_error_function,\n )", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "get_function_tool_call", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_function_tool_call(\n name: str, arguments: str | None = None, call_id: str | None = None\n) -> ResponseOutputItem:\n return ResponseFunctionToolCall(\n id=\"1\",\n call_id=call_id or \"2\",\n type=\"function_call\",\n name=name,\n arguments=arguments or \"\",\n )", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "get_handoff_tool_call", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_handoff_tool_call(\n to_agent: Agent[Any], override_name: str | None = None, args: str | None = None\n) -> ResponseOutputItem:\n name = override_name or Handoff.default_tool_name(to_agent)\n return get_function_tool_call(name, args)\ndef get_final_output_message(args: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "get_final_output_message", - "kind": 2, - "importPath": "tests.test_responses", - "description": "tests.test_responses", - "peekOfCode": "def get_final_output_message(args: str) -> ResponseOutputItem:\n return ResponseOutputMessage(\n id=\"1\",\n type=\"message\",\n role=\"assistant\",\n content=[ResponseOutputText(text=args, type=\"output_text\", annotations=[])],\n status=\"completed\",\n )", - "detail": "tests.test_responses", - "documentation": {} - }, - { - "label": "DummyTracing", - "kind": 6, - "importPath": "tests.test_responses_tracing", - "description": "tests.test_responses_tracing", - "peekOfCode": "class DummyTracing:\n def is_disabled(self):\n return False\nclass DummyUsage:\n def __init__(\n self,\n input_tokens: int = 1,\n input_tokens_details: Optional[InputTokensDetails] = None,\n output_tokens: int = 1,\n output_tokens_details: Optional[OutputTokensDetails] = None,", - "detail": "tests.test_responses_tracing", - "documentation": {} - }, - { - "label": "DummyUsage", - "kind": 6, - "importPath": "tests.test_responses_tracing", - "description": "tests.test_responses_tracing", - "peekOfCode": "class DummyUsage:\n def __init__(\n self,\n input_tokens: int = 1,\n input_tokens_details: Optional[InputTokensDetails] = None,\n output_tokens: int = 1,\n output_tokens_details: Optional[OutputTokensDetails] = None,\n total_tokens: int = 2,\n ):\n self.input_tokens = input_tokens", - "detail": "tests.test_responses_tracing", - "documentation": {} - }, - { - "label": "DummyResponse", - "kind": 6, - "importPath": "tests.test_responses_tracing", - "description": "tests.test_responses_tracing", - "peekOfCode": "class DummyResponse:\n def __init__(self):\n self.id = \"dummy-id\"\n self.output = []\n self.usage = DummyUsage()\n def __aiter__(self):\n yield ResponseCompletedEvent(\n type=\"response.completed\",\n response=fake_model.get_response_obj(self.output),\n sequence_number=0,", - "detail": "tests.test_responses_tracing", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_result_cast", - "description": "tests.test_result_cast", - "peekOfCode": "class Foo(BaseModel):\n bar: int\ndef test_result_cast_typechecks():\n \"\"\"Correct casts should work fine.\"\"\"\n result = create_run_result(1)\n assert result.final_output_as(int) == 1\n result = create_run_result(\"test\")\n assert result.final_output_as(str) == \"test\"\n result = create_run_result(Foo(bar=1))\n assert result.final_output_as(Foo) == Foo(bar=1)", - "detail": "tests.test_result_cast", - "documentation": {} - }, - { - "label": "create_run_result", - "kind": 2, - "importPath": "tests.test_result_cast", - "description": "tests.test_result_cast", - "peekOfCode": "def create_run_result(final_output: Any) -> RunResult:\n return RunResult(\n input=\"test\",\n new_items=[],\n raw_responses=[],\n final_output=final_output,\n input_guardrail_results=[],\n output_guardrail_results=[],\n _last_agent=Agent(name=\"test\"),\n context_wrapper=RunContextWrapper(context=None),", - "detail": "tests.test_result_cast", - "documentation": {} - }, - { - "label": "test_result_cast_typechecks", - "kind": 2, - "importPath": "tests.test_result_cast", - "description": "tests.test_result_cast", - "peekOfCode": "def test_result_cast_typechecks():\n \"\"\"Correct casts should work fine.\"\"\"\n result = create_run_result(1)\n assert result.final_output_as(int) == 1\n result = create_run_result(\"test\")\n assert result.final_output_as(str) == \"test\"\n result = create_run_result(Foo(bar=1))\n assert result.final_output_as(Foo) == Foo(bar=1)\ndef test_bad_cast_doesnt_raise():\n \"\"\"Bad casts shouldn't error unless we ask for it.\"\"\"", - "detail": "tests.test_result_cast", - "documentation": {} - }, - { - "label": "test_bad_cast_doesnt_raise", - "kind": 2, - "importPath": "tests.test_result_cast", - "description": "tests.test_result_cast", - "peekOfCode": "def test_bad_cast_doesnt_raise():\n \"\"\"Bad casts shouldn't error unless we ask for it.\"\"\"\n result = create_run_result(1)\n result.final_output_as(str)\n result = create_run_result(\"test\")\n result.final_output_as(Foo)\ndef test_bad_cast_with_param_raises():\n \"\"\"Bad casts should raise a TypeError when we ask for it.\"\"\"\n result = create_run_result(1)\n with pytest.raises(TypeError):", - "detail": "tests.test_result_cast", - "documentation": {} - }, - { - "label": "test_bad_cast_with_param_raises", - "kind": 2, - "importPath": "tests.test_result_cast", - "description": "tests.test_result_cast", - "peekOfCode": "def test_bad_cast_with_param_raises():\n \"\"\"Bad casts should raise a TypeError when we ask for it.\"\"\"\n result = create_run_result(1)\n with pytest.raises(TypeError):\n result.final_output_as(str, raise_if_incorrect_type=True)\n result = create_run_result(\"test\")\n with pytest.raises(TypeError):\n result.final_output_as(Foo, raise_if_incorrect_type=True)\n result = create_run_result(Foo(bar=1))\n with pytest.raises(TypeError):", - "detail": "tests.test_result_cast", - "documentation": {} - }, - { - "label": "DummyProvider", - "kind": 6, - "importPath": "tests.test_run_config", - "description": "tests.test_run_config", - "peekOfCode": "class DummyProvider(ModelProvider):\n \"\"\"A simple model provider that always returns the same model, and\n records the model name it was asked to provide.\"\"\"\n def __init__(self, model_to_return: Model | None = None) -> None:\n self.last_requested: str | None = None\n self.model_to_return: Model = model_to_return or FakeModel()\n def get_model(self, model_name: str | None) -> Model:\n # record the requested model name and return our test model\n self.last_requested = model_name\n return self.model_to_return", - "detail": "tests.test_run_config", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_run_step_execution", - "description": "tests.test_run_step_execution", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_final_output_without_tool_runs_again():\n agent = Agent(name=\"test\", output_type=Foo, tools=[get_function_tool(\"tool_1\", \"result\")])\n response = ModelResponse(\n output=[get_function_tool_call(\"tool_1\")],\n usage=Usage(),\n response_id=None,\n )", - "detail": "tests.test_run_step_execution", - "documentation": {} - }, - { - "label": "assert_item_is_message", - "kind": 2, - "importPath": "tests.test_run_step_execution", - "description": "tests.test_run_step_execution", - "peekOfCode": "def assert_item_is_message(item: RunItem, text: str) -> None:\n assert isinstance(item, MessageOutputItem)\n assert item.raw_item.type == \"message\"\n assert item.raw_item.role == \"assistant\"\n assert item.raw_item.content[0].type == \"output_text\"\n assert item.raw_item.content[0].text == text\ndef assert_item_is_function_tool_call(\n item: RunItem, name: str, arguments: str | None = None\n) -> None:\n assert isinstance(item, ToolCallItem)", - "detail": "tests.test_run_step_execution", - "documentation": {} - }, - { - "label": "assert_item_is_function_tool_call", - "kind": 2, - "importPath": "tests.test_run_step_execution", - "description": "tests.test_run_step_execution", - "peekOfCode": "def assert_item_is_function_tool_call(\n item: RunItem, name: str, arguments: str | None = None\n) -> None:\n assert isinstance(item, ToolCallItem)\n assert item.raw_item.type == \"function_call\"\n assert item.raw_item.name == name\n assert not arguments or item.raw_item.arguments == arguments\ndef assert_item_is_function_tool_call_output(item: RunItem, output: str) -> None:\n assert isinstance(item, ToolCallOutputItem)\n assert item.raw_item[\"type\"] == \"function_call_output\"", - "detail": "tests.test_run_step_execution", - "documentation": {} - }, - { - "label": "assert_item_is_function_tool_call_output", - "kind": 2, - "importPath": "tests.test_run_step_execution", - "description": "tests.test_run_step_execution", - "peekOfCode": "def assert_item_is_function_tool_call_output(item: RunItem, output: str) -> None:\n assert isinstance(item, ToolCallOutputItem)\n assert item.raw_item[\"type\"] == \"function_call_output\"\n assert item.raw_item[\"output\"] == output\nasync def get_execute_result(\n agent: Agent[Any],\n response: ModelResponse,\n *,\n original_input: str | list[TResponseInputItem] | None = None,\n generated_items: list[RunItem] | None = None,", - "detail": "tests.test_run_step_execution", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_run_step_processing", - "description": "tests.test_run_step_processing", - "peekOfCode": "class Foo(BaseModel):\n bar: str\n@pytest.mark.asyncio\nasync def test_final_output_parsed_correctly():\n agent = Agent(name=\"test\", output_type=Foo)\n response = ModelResponse(\n output=[\n get_text_message(\"Hello, world!\"),\n get_final_output_message(Foo(bar=\"123\").model_dump_json()),\n ],", - "detail": "tests.test_run_step_processing", - "documentation": {} - }, - { - "label": "DummyComputer", - "kind": 6, - "importPath": "tests.test_run_step_processing", - "description": "tests.test_run_step_processing", - "peekOfCode": "class DummyComputer(Computer):\n \"\"\"Minimal computer implementation for testing.\"\"\"\n @property\n def environment(self):\n return \"mac\" # pragma: no cover\n @property\n def dimensions(self):\n return (0, 0) # pragma: no cover\n def screenshot(self) -> str:\n return \"\" # pragma: no cover", - "detail": "tests.test_run_step_processing", - "documentation": {} - }, - { - "label": "test_empty_response", - "kind": 2, - "importPath": "tests.test_run_step_processing", - "description": "tests.test_run_step_processing", - "peekOfCode": "def test_empty_response():\n agent = Agent(name=\"test\")\n response = ModelResponse(\n output=[],\n usage=Usage(),\n response_id=None,\n )\n result = RunImpl.process_model_response(\n agent=agent,\n response=response,", - "detail": "tests.test_run_step_processing", - "documentation": {} - }, - { - "label": "test_no_tool_calls", - "kind": 2, - "importPath": "tests.test_run_step_processing", - "description": "tests.test_run_step_processing", - "peekOfCode": "def test_no_tool_calls():\n agent = Agent(name=\"test\")\n response = ModelResponse(\n output=[get_text_message(\"Hello, world!\")],\n usage=Usage(),\n response_id=None,\n )\n result = RunImpl.process_model_response(\n agent=agent, response=response, output_schema=None, handoffs=[], all_tools=[]\n )", - "detail": "tests.test_run_step_processing", - "documentation": {} - }, - { - "label": "FakeRealtimeModel", - "kind": 6, - "importPath": "tests.test_session_exceptions", - "description": "tests.test_session_exceptions", - "peekOfCode": "class FakeRealtimeModel(RealtimeModel):\n \"\"\"Fake model for testing that forwards events to listeners.\"\"\"\n def __init__(self):\n self._listeners: list[RealtimeModelListener] = []\n self._events_to_send: list[RealtimeModelEvent] = []\n self._is_connected = False\n self._send_task: asyncio.Task[None] | None = None\n def set_next_events(self, events: list[RealtimeModelEvent]) -> None:\n \"\"\"Set events to be sent to listeners.\"\"\"\n self._events_to_send = events.copy()", - "detail": "tests.test_session_exceptions", - "documentation": {} - }, - { - "label": "TestSessionExceptions", - "kind": 6, - "importPath": "tests.test_session_exceptions", - "description": "tests.test_session_exceptions", - "peekOfCode": "class TestSessionExceptions:\n \"\"\"Test exception handling in RealtimeSession.\"\"\"\n @pytest.mark.asyncio\n async def test_end_to_end_exception_propagation_and_cleanup(\n self, fake_model: FakeRealtimeModel, fake_agent\n ):\n \"\"\"Test that exceptions are stored, trigger cleanup, and are raised in __aiter__.\"\"\"\n # Create test exception\n test_exception = ValueError(\"Test error\")\n exception_event = RealtimeModelExceptionEvent(", - "detail": "tests.test_session_exceptions", - "documentation": {} - }, - { - "label": "fake_agent", - "kind": 2, - "importPath": "tests.test_session_exceptions", - "description": "tests.test_session_exceptions", - "peekOfCode": "def fake_agent():\n \"\"\"Create a fake agent for testing.\"\"\"\n agent = Mock()\n agent.get_all_tools = AsyncMock(return_value=[])\n return agent\n@pytest.fixture\ndef fake_model():\n \"\"\"Create a fake model for testing.\"\"\"\n return FakeRealtimeModel()\nclass TestSessionExceptions:", - "detail": "tests.test_session_exceptions", - "documentation": {} - }, - { - "label": "fake_model", - "kind": 2, - "importPath": "tests.test_session_exceptions", - "description": "tests.test_session_exceptions", - "peekOfCode": "def fake_model():\n \"\"\"Create a fake model for testing.\"\"\"\n return FakeRealtimeModel()\nclass TestSessionExceptions:\n \"\"\"Test exception handling in RealtimeSession.\"\"\"\n @pytest.mark.asyncio\n async def test_end_to_end_exception_propagation_and_cleanup(\n self, fake_model: FakeRealtimeModel, fake_agent\n ):\n \"\"\"Test that exceptions are stored, trigger cleanup, and are raised in __aiter__.\"\"\"", - "detail": "tests.test_session_exceptions", - "documentation": {} - }, - { - "label": "test_empty_schema_has_additional_properties_false", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_empty_schema_has_additional_properties_false():\n strict_schema = ensure_strict_json_schema({})\n assert strict_schema[\"additionalProperties\"] is False\ndef test_non_dict_schema_errors():\n with pytest.raises(TypeError):\n ensure_strict_json_schema([]) # type: ignore\ndef test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_non_dict_schema_errors", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_non_dict_schema_errors():\n with pytest.raises(TypeError):\n ensure_strict_json_schema([]) # type: ignore\ndef test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"object\"\n assert result[\"additionalProperties\"] is False", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_object_without_additional_properties", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_object_without_additional_properties():\n # When an object type schema has properties but no additionalProperties,\n # it should be added and the \"required\" list set from the property keys.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"object\"\n assert result[\"additionalProperties\"] is False\n assert result[\"required\"] == [\"a\"]\n # The inner property remains unchanged (no additionalProperties is added for non-object types)\n assert result[\"properties\"][\"a\"] == {\"type\": \"string\"}", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_object_with_true_additional_properties", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_object_with_true_additional_properties():\n # If additionalProperties is explicitly set to True for an object, a UserError should be raised.\n schema = {\n \"type\": \"object\",\n \"properties\": {\"a\": {\"type\": \"number\"}},\n \"additionalProperties\": True,\n }\n with pytest.raises(UserError):\n ensure_strict_json_schema(schema)\ndef test_array_items_processing_and_default_removal():", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_array_items_processing_and_default_removal", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_array_items_processing_and_default_removal():\n # When processing an array, the items schema is processed recursively.\n # Also, any \"default\": None should be removed.\n schema = {\n \"type\": \"array\",\n \"items\": {\"type\": \"number\", \"default\": None},\n }\n result = ensure_strict_json_schema(schema)\n # \"default\" should be stripped from the items schema.\n assert \"default\" not in result[\"items\"]", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_anyOf_processing", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_anyOf_processing():\n # Test that anyOf schemas are processed.\n schema = {\n \"anyOf\": [\n {\"type\": \"object\", \"properties\": {\"a\": {\"type\": \"string\"}}},\n {\"type\": \"number\", \"default\": None},\n ]\n }\n result = ensure_strict_json_schema(schema)\n # For the first variant: object type should get additionalProperties and required keys set.", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_allOf_single_entry_merging", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_allOf_single_entry_merging():\n # When an allOf list has a single entry, its content should be merged into the parent.\n schema = {\n \"type\": \"object\",\n \"allOf\": [{\"properties\": {\"a\": {\"type\": \"boolean\"}}}],\n }\n result = ensure_strict_json_schema(schema)\n # allOf should be removed and merged.\n assert \"allOf\" not in result\n # The object should now have additionalProperties set and required set.", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_default_removal_on_non_object", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_default_removal_on_non_object():\n # Test that \"default\": None is stripped from schemas that are not objects.\n schema = {\"type\": \"string\", \"default\": None}\n result = ensure_strict_json_schema(schema)\n assert result[\"type\"] == \"string\"\n assert \"default\" not in result\ndef test_ref_expansion():\n # Construct a schema with a definitions section and a property with a $ref.\n schema = {\n \"definitions\": {\"refObj\": {\"type\": \"string\", \"default\": None}},", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_ref_expansion", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_ref_expansion():\n # Construct a schema with a definitions section and a property with a $ref.\n schema = {\n \"definitions\": {\"refObj\": {\"type\": \"string\", \"default\": None}},\n \"type\": \"object\",\n \"properties\": {\"a\": {\"$ref\": \"#/definitions/refObj\", \"description\": \"desc\"}},\n }\n result = ensure_strict_json_schema(schema)\n a_schema = result[\"properties\"][\"a\"]\n # The $ref should be expanded so that the type is from the referenced definition,", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_ref_no_expansion_when_alone", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_ref_no_expansion_when_alone():\n # If the schema only contains a $ref key, it should not be expanded.\n schema = {\"$ref\": \"#/definitions/refObj\"}\n result = ensure_strict_json_schema(schema)\n # Because there is only one key, the $ref remains unchanged.\n assert result == {\"$ref\": \"#/definitions/refObj\"}\ndef test_invalid_ref_format():\n # A $ref that does not start with \"#/\" should trigger a ValueError when resolved.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"$ref\": \"invalid\", \"description\": \"desc\"}}}\n with pytest.raises(ValueError):", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "test_invalid_ref_format", - "kind": 2, - "importPath": "tests.test_strict_schema", - "description": "tests.test_strict_schema", - "peekOfCode": "def test_invalid_ref_format():\n # A $ref that does not start with \"#/\" should trigger a ValueError when resolved.\n schema = {\"type\": \"object\", \"properties\": {\"a\": {\"$ref\": \"invalid\", \"description\": \"desc\"}}}\n with pytest.raises(ValueError):\n ensure_strict_json_schema(schema)", - "detail": "tests.test_strict_schema", - "documentation": {} - }, - { - "label": "TestToolChoiceReset", - "kind": 6, - "importPath": "tests.test_tool_choice_reset", - "description": "tests.test_tool_choice_reset", - "peekOfCode": "class TestToolChoiceReset:\n def test_should_reset_tool_choice_direct(self):\n \"\"\"\n Test the _should_reset_tool_choice method directly with various inputs\n to ensure it correctly identifies cases where reset is needed.\n \"\"\"\n agent = Agent(name=\"test_agent\")\n # Case 1: Empty tool use tracker should not change the \"None\" tool choice\n model_settings = ModelSettings(tool_choice=None)\n tracker = AgentToolUseTracker()", - "detail": "tests.test_tool_choice_reset", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_tool_converter", - "description": "tests.test_tool_converter", - "peekOfCode": "class Foo(BaseModel):\n a: str\n b: list[int]\ndef test_convert_handoff_tool():\n agent = Agent(name=\"test_1\", handoff_description=\"test_2\")\n handoff_obj = handoff(agent=agent)\n result = Converter.convert_handoff_tool(handoff_obj)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == Handoff.default_tool_name(agent)\n assert result[\"function\"].get(\"description\") == Handoff.default_tool_description(agent)", - "detail": "tests.test_tool_converter", - "documentation": {} - }, - { - "label": "some_function", - "kind": 2, - "importPath": "tests.test_tool_converter", - "description": "tests.test_tool_converter", - "peekOfCode": "def some_function(a: str, b: list[int]) -> str:\n return \"hello\"\ndef test_to_openai_with_function_tool():\n some_function(a=\"foo\", b=[1, 2, 3])\n tool = function_tool(some_function)\n result = Converter.tool_to_openai(tool)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == \"some_function\"\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None", - "detail": "tests.test_tool_converter", - "documentation": {} - }, - { - "label": "test_to_openai_with_function_tool", - "kind": 2, - "importPath": "tests.test_tool_converter", - "description": "tests.test_tool_converter", - "peekOfCode": "def test_to_openai_with_function_tool():\n some_function(a=\"foo\", b=[1, 2, 3])\n tool = function_tool(some_function)\n result = Converter.tool_to_openai(tool)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == \"some_function\"\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None\n properties = params.get(\"properties\", {})\n assert isinstance(properties, dict)", - "detail": "tests.test_tool_converter", - "documentation": {} - }, - { - "label": "test_convert_handoff_tool", - "kind": 2, - "importPath": "tests.test_tool_converter", - "description": "tests.test_tool_converter", - "peekOfCode": "def test_convert_handoff_tool():\n agent = Agent(name=\"test_1\", handoff_description=\"test_2\")\n handoff_obj = handoff(agent=agent)\n result = Converter.convert_handoff_tool(handoff_obj)\n assert result[\"type\"] == \"function\"\n assert result[\"function\"][\"name\"] == Handoff.default_tool_name(agent)\n assert result[\"function\"].get(\"description\") == Handoff.default_tool_description(agent)\n params = result.get(\"function\", {}).get(\"parameters\")\n assert params is not None\n for key, value in handoff_obj.input_json_schema.items():", - "detail": "tests.test_tool_converter", - "documentation": {} - }, - { - "label": "test_tool_converter_hosted_tools_errors", - "kind": 2, - "importPath": "tests.test_tool_converter", - "description": "tests.test_tool_converter", - "peekOfCode": "def test_tool_converter_hosted_tools_errors():\n with pytest.raises(UserError):\n Converter.tool_to_openai(WebSearchTool())\n with pytest.raises(UserError):\n Converter.tool_to_openai(FileSearchTool(vector_store_ids=[\"abc\"], max_num_results=1))", - "detail": "tests.test_tool_converter", - "documentation": {} - }, - { - "label": "get_span", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def get_span(processor: TracingProcessor) -> SpanImpl[AgentSpanData]:\n \"\"\"Create a minimal agent span for testing processors.\"\"\"\n return SpanImpl(\n trace_id=\"test_trace_id\",\n span_id=\"test_span_id\",\n parent_id=None,\n processor=processor,\n span_data=AgentSpanData(name=\"test_agent\"),\n )\ndef get_trace(processor: TracingProcessor) -> TraceImpl:", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "get_trace", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def get_trace(processor: TracingProcessor) -> TraceImpl:\n \"\"\"Create a minimal trace.\"\"\"\n return TraceImpl(\n name=\"test_trace\",\n trace_id=\"test_trace_id\",\n group_id=\"test_session_id\",\n metadata={},\n processor=processor,\n )\n@pytest.fixture", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "mocked_exporter", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def mocked_exporter():\n exporter = MagicMock()\n exporter.export = MagicMock()\n return exporter\ndef test_batch_trace_processor_on_trace_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_trace = get_trace(processor)\n processor.on_trace_start(test_trace)\n assert processor._queue.qsize() == 1, \"Trace should be added to the queue\"\n # Shutdown to clean up the worker thread", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_on_trace_start", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_on_trace_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_trace = get_trace(processor)\n processor.on_trace_start(test_trace)\n assert processor._queue.qsize() == 1, \"Trace should be added to the queue\"\n # Shutdown to clean up the worker thread\n processor.shutdown()\ndef test_batch_trace_processor_on_span_end(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_span = get_span(processor)", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_on_span_end", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_on_span_end(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=0.1)\n test_span = get_span(processor)\n processor.on_span_end(test_span)\n assert processor._queue.qsize() == 1, \"Span should be added to the queue\"\n # Shutdown to clean up the worker thread\n processor.shutdown()\ndef test_batch_trace_processor_queue_full(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_queue_size=2, schedule_delay=0.1)\n # Fill the queue", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_queue_full", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_queue_full(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_queue_size=2, schedule_delay=0.1)\n # Fill the queue\n processor.on_trace_start(get_trace(processor))\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.full() is True\n # Next item should not be queued\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.qsize() == 2, \"Queue should not exceed max_queue_size\"\n processor.on_span_end(get_span(processor))", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_processor_doesnt_enqueue_on_trace_end_or_span_start", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_processor_doesnt_enqueue_on_trace_end_or_span_start(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter)\n processor.on_trace_start(get_trace(processor))\n assert processor._queue.qsize() == 1, \"Trace should be queued\"\n processor.on_span_start(get_span(processor))\n assert processor._queue.qsize() == 1, \"Span should not be queued\"\n processor.on_span_end(get_span(processor))\n assert processor._queue.qsize() == 2, \"Span should be queued\"\n processor.on_trace_end(get_trace(processor))\n assert processor._queue.qsize() == 2, \"Nothing new should be queued\"", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_force_flush", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_force_flush(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, max_batch_size=2, schedule_delay=5.0)\n processor.on_trace_start(get_trace(processor))\n processor.on_span_end(get_span(processor))\n processor.on_span_end(get_span(processor))\n processor.force_flush()\n # Ensure exporter.export was called with all items\n # Because max_batch_size=2, it may have been called multiple times\n total_exported = 0\n for call_args in mocked_exporter.export.call_args_list:", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_shutdown_flushes", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_shutdown_flushes(mocked_exporter):\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=5.0)\n processor.on_trace_start(get_trace(processor))\n processor.on_span_end(get_span(processor))\n qsize_before = processor._queue.qsize()\n assert qsize_before == 2\n processor.shutdown()\n # Ensure everything was exported after shutdown\n total_exported = 0\n for call_args in mocked_exporter.export.call_args_list:", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_batch_trace_processor_scheduled_export", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_batch_trace_processor_scheduled_export(mocked_exporter):\n \"\"\"\n Tests that items are automatically exported when the schedule_delay expires.\n We mock time.time() so we can trigger the condition without waiting in real time.\n \"\"\"\n with patch(\"time.time\") as mock_time:\n base_time = 1000.0\n mock_time.return_value = base_time\n processor = BatchTraceProcessor(exporter=mocked_exporter, schedule_delay=1.0)\n processor.on_span_end(get_span(processor)) # queue size = 1", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "patched_time_sleep", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def patched_time_sleep():\n \"\"\"\n Fixture to replace time.sleep with a no-op to speed up tests\n that rely on retry/backoff logic.\n \"\"\"\n with patch(\"time.sleep\") as mock_sleep:\n yield mock_sleep\ndef mock_processor():\n processor = MagicMock()\n processor.on_trace_start = MagicMock()", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "mock_processor", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def mock_processor():\n processor = MagicMock()\n processor.on_trace_start = MagicMock()\n processor.on_span_end = MagicMock()\n return processor\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_no_items(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([])\n # No calls should be made if there are no items", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_no_items", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_no_items(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([])\n # No calls should be made if there are no items\n mock_client.return_value.post.assert_not_called()\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_no_api_key(mock_client):\n # Ensure that os.environ is empty (sometimes devs have the openai api key set in their env)\n with patch.dict(os.environ, {}, clear=True):", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_no_api_key", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_no_api_key(mock_client):\n # Ensure that os.environ is empty (sometimes devs have the openai api key set in their env)\n with patch.dict(os.environ, {}, clear=True):\n exporter = BackendSpanExporter(api_key=None)\n exporter.export([get_span(mock_processor())])\n # Should log an error and return without calling post\n mock_client.return_value.post.assert_not_called()\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_2xx_success(mock_client):", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_2xx_success", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_2xx_success(mock_client):\n mock_response = MagicMock()\n mock_response.status_code = 200\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([get_span(mock_processor()), get_trace(mock_processor())])\n # Should have called post exactly once\n mock_client.return_value.post.assert_called_once()\n exporter.close()\n@patch(\"httpx.Client\")", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_4xx_client_error", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_4xx_client_error(mock_client):\n mock_response = MagicMock()\n mock_response.status_code = 400\n mock_response.text = \"Bad Request\"\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.export([get_span(mock_processor())])\n # 4xx should not be retried\n mock_client.return_value.post.assert_called_once()\n exporter.close()", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_5xx_retry", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_5xx_retry(mock_client, patched_time_sleep):\n mock_response = MagicMock()\n mock_response.status_code = 500\n # Make post() return 500 every time\n mock_client.return_value.post.return_value = mock_response\n exporter = BackendSpanExporter(api_key=\"test_key\", max_retries=3, base_delay=0.1, max_delay=0.2)\n exporter.export([get_span(mock_processor())])\n # Should retry up to max_retries times\n assert mock_client.return_value.post.call_count == 3\n exporter.close()", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_request_error", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_request_error(mock_client, patched_time_sleep):\n # Make post() raise a RequestError each time\n mock_client.return_value.post.side_effect = httpx.RequestError(\"Network error\")\n exporter = BackendSpanExporter(api_key=\"test_key\", max_retries=2, base_delay=0.1, max_delay=0.2)\n exporter.export([get_span(mock_processor())])\n # Should retry up to max_retries times\n assert mock_client.return_value.post.call_count == 2\n exporter.close()\n@patch(\"httpx.Client\")\ndef test_backend_span_exporter_close(mock_client):", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "test_backend_span_exporter_close", - "kind": 2, - "importPath": "tests.test_trace_processor", - "description": "tests.test_trace_processor", - "peekOfCode": "def test_backend_span_exporter_close(mock_client):\n exporter = BackendSpanExporter(api_key=\"test_key\")\n exporter.close()\n # Ensure underlying http client is closed\n mock_client.return_value.close.assert_called_once()", - "detail": "tests.test_trace_processor", - "documentation": {} - }, - { - "label": "standard_span_checks", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def standard_span_checks(\n span: Span[Any], trace_id: str, parent_id: str | None, span_type: str\n) -> None:\n assert span.span_id is not None\n assert span.trace_id == trace_id\n assert span.parent_id == parent_id\n assert span.started_at is not None\n assert span.ended_at is not None\n assert span.span_data.type == span_type\ndef standard_trace_checks(trace: Trace, name_check: str | None = None) -> None:", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "standard_trace_checks", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def standard_trace_checks(trace: Trace, name_check: str | None = None) -> None:\n assert trace.trace_id is not None\n if name_check:\n assert trace.name == name_check\n### TESTS\ndef simple_tracing():\n x = trace(\"test\")\n x.start()\n span_1 = agent_span(name=\"agent_1\", span_id=\"span_1\", parent=x)\n span_1.start()", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "simple_tracing", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def simple_tracing():\n x = trace(\"test\")\n x.start()\n span_1 = agent_span(name=\"agent_1\", span_id=\"span_1\", parent=x)\n span_1.start()\n span_1.finish()\n span_2 = custom_span(name=\"custom_1\", span_id=\"span_2\", parent=x)\n span_2.start()\n span_3 = custom_span(name=\"custom_2\", span_id=\"span_3\", parent=span_2)\n span_3.start()", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_simple_tracing", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_simple_tracing() -> None:\n simple_tracing()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"children\": [\n {\n \"type\": \"agent\",\n \"id\": \"span_1\",", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "ctxmanager_spans", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def ctxmanager_spans():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\", group_id=\"456\"):\n with custom_span(name=\"custom_1\", span_id=\"span_1\"):\n with custom_span(name=\"custom_2\", span_id=\"span_1_inner\"):\n pass\n with custom_span(name=\"custom_2\", span_id=\"span_2\"):\n pass\ndef test_ctxmanager_spans() -> None:\n ctxmanager_spans()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_ctxmanager_spans", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_ctxmanager_spans() -> None:\n ctxmanager_spans()\n assert fetch_normalized_spans(keep_span_id=True) == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"group_id\": \"456\",\n \"children\": [\n {\n \"type\": \"custom\",", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "spans_with_setters", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def spans_with_setters():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\", group_id=\"456\"):\n with agent_span(name=\"agent_1\") as span_a:\n span_a.span_data.name = \"agent_2\"\n with function_span(name=\"function_1\") as span_b:\n span_b.span_data.input = \"i\"\n span_b.span_data.output = \"o\"\n with generation_span() as span_c:\n span_c.span_data.input = [{\"foo\": \"bar\"}]\n with handoff_span(from_agent=\"agent_1\", to_agent=\"agent_2\"):", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_spans_with_setters", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_spans_with_setters() -> None:\n spans_with_setters()\n assert fetch_normalized_spans() == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"group_id\": \"456\",\n \"children\": [\n {\n \"type\": \"agent\",", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "disabled_tracing", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def disabled_tracing():\n with trace(workflow_name=\"test\", trace_id=\"123\", group_id=\"456\", disabled=True):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\"):\n pass\ndef test_disabled_tracing():\n disabled_tracing()\n assert_no_traces()\ndef enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_disabled_tracing", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_disabled_tracing():\n disabled_tracing()\n assert_no_traces()\ndef enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\", disabled=True):\n with generation_span():\n pass\ndef test_enabled_trace_disabled_span():", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "enabled_trace_disabled_span", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def enabled_trace_disabled_span():\n with trace(workflow_name=\"test\", trace_id=\"trace_123\"):\n with agent_span(name=\"agent_1\"):\n with function_span(name=\"function_1\", disabled=True):\n with generation_span():\n pass\ndef test_enabled_trace_disabled_span():\n enabled_trace_disabled_span()\n assert fetch_normalized_spans() == snapshot(\n [", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_enabled_trace_disabled_span", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_enabled_trace_disabled_span():\n enabled_trace_disabled_span()\n assert fetch_normalized_spans() == snapshot(\n [\n {\n \"workflow_name\": \"test\",\n \"children\": [\n {\n \"type\": \"agent\",\n \"data\": {\"name\": \"agent_1\"},", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_start_and_end_called_manual", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_start_and_end_called_manual():\n simple_tracing()\n events = fetch_events()\n assert events == [\n \"trace_start\",\n \"span_start\", # span_1\n \"span_end\", # span_1\n \"span_start\", # span_2\n \"span_start\", # span_3\n \"span_end\", # span_3", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "test_start_and_end_called_ctxmanager", - "kind": 2, - "importPath": "tests.test_tracing", - "description": "tests.test_tracing", - "peekOfCode": "def test_start_and_end_called_ctxmanager():\n with trace(workflow_name=\"test\", trace_id=\"123\", group_id=\"456\"):\n with custom_span(name=\"custom_1\", span_id=\"span_1\"):\n with custom_span(name=\"custom_2\", span_id=\"span_1_inner\"):\n pass\n with custom_span(name=\"custom_2\", span_id=\"span_2\"):\n pass\n events = fetch_events()\n assert events == [\n \"trace_start\",", - "detail": "tests.test_tracing", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_tracing_errors", - "description": "tests.test_tracing_errors", - "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_multiple_final_output_doesnt_error():\n model = FakeModel(tracing_enabled=True)\n agent_1 = Agent(\n name=\"test\",\n model=model,\n output_type=Foo,\n )", - "detail": "tests.test_tracing_errors", - "documentation": {} - }, - { - "label": "guardrail_function", - "kind": 2, - "importPath": "tests.test_tracing_errors", - "description": "tests.test_tracing_errors", - "peekOfCode": "def guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_guardrail_error():\n agent = Agent(", - "detail": "tests.test_tracing_errors", - "documentation": {} - }, - { - "label": "Foo", - "kind": 6, - "importPath": "tests.test_tracing_errors_streamed", - "description": "tests.test_tracing_errors_streamed", - "peekOfCode": "class Foo(TypedDict):\n bar: str\n@pytest.mark.asyncio\nasync def test_multiple_final_output_no_error():\n model = FakeModel(tracing_enabled=True)\n agent_1 = Agent(\n name=\"test\",\n model=model,\n output_type=Foo,\n )", - "detail": "tests.test_tracing_errors_streamed", - "documentation": {} - }, - { - "label": "input_guardrail_function", - "kind": 2, - "importPath": "tests.test_tracing_errors_streamed", - "description": "tests.test_tracing_errors_streamed", - "peekOfCode": "def input_guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], input: str | list[TResponseInputItem]\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_input_guardrail_error():\n model = FakeModel()", - "detail": "tests.test_tracing_errors_streamed", - "documentation": {} - }, - { - "label": "output_guardrail_function", - "kind": 2, - "importPath": "tests.test_tracing_errors_streamed", - "description": "tests.test_tracing_errors_streamed", - "peekOfCode": "def output_guardrail_function(\n context: RunContextWrapper[Any], agent: Agent[Any], agent_output: Any\n) -> GuardrailFunctionOutput:\n return GuardrailFunctionOutput(\n output_info=None,\n tripwire_triggered=True,\n )\n@pytest.mark.asyncio\nasync def test_output_guardrail_error():\n model = FakeModel()", - "detail": "tests.test_tracing_errors_streamed", - "documentation": {} - }, - { - "label": "test_usage_add_aggregates_all_fields", - "kind": 2, - "importPath": "tests.test_usage", - "description": "tests.test_usage", - "peekOfCode": "def test_usage_add_aggregates_all_fields():\n u1 = Usage(\n requests=1,\n input_tokens=10,\n input_tokens_details=InputTokensDetails(cached_tokens=3),\n output_tokens=20,\n output_tokens_details=OutputTokensDetails(reasoning_tokens=5),\n total_tokens=30,\n )\n u2 = Usage(", - "detail": "tests.test_usage", - "documentation": {} - }, - { - "label": "test_usage_add_aggregates_with_none_values", - "kind": 2, - "importPath": "tests.test_usage", - "description": "tests.test_usage", - "peekOfCode": "def test_usage_add_aggregates_with_none_values():\n u1 = Usage()\n u2 = Usage(\n requests=2,\n input_tokens=7,\n input_tokens_details=InputTokensDetails(cached_tokens=4),\n output_tokens=8,\n output_tokens_details=OutputTokensDetails(reasoning_tokens=6),\n total_tokens=15,\n )", - "detail": "tests.test_usage", - "documentation": {} - }, - { - "label": "mock_agent", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def mock_agent():\n tool1 = Mock()\n tool1.name = \"Tool1\"\n tool2 = Mock()\n tool2.name = \"Tool2\"\n handoff1 = Mock(spec=Handoff)\n handoff1.agent_name = \"Handoff1\"\n agent = Mock(spec=Agent)\n agent.name = \"Agent1\"\n agent.tools = [tool1, tool2]", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "test_get_main_graph", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def test_get_main_graph(mock_agent):\n result = get_main_graph(mock_agent)\n print(result)\n assert \"digraph G\" in result\n assert \"graph [splines=true];\" in result\n assert 'node [fontname=\"Arial\"];' in result\n assert \"edge [penwidth=1.5];\" in result\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "test_get_all_nodes", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def test_get_all_nodes(mock_agent):\n result = get_all_nodes(mock_agent)\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result\n )\n assert (\n '\"__end__\" [label=\"__end__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in result\n )", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "test_get_all_edges", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def test_get_all_edges(mock_agent):\n result = get_all_edges(mock_agent)\n assert '\"__start__\" -> \"Agent1\";' in result\n assert '\"Agent1\" -> \"__end__\";'\n assert '\"Agent1\" -> \"Tool1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Tool1\" -> \"Agent1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Agent1\" -> \"Tool2\" [style=dotted, penwidth=1.5];' in result\n assert '\"Tool2\" -> \"Agent1\" [style=dotted, penwidth=1.5];' in result\n assert '\"Agent1\" -> \"Handoff1\";' in result\ndef test_draw_graph(mock_agent):", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "test_draw_graph", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def test_draw_graph(mock_agent):\n graph = draw_graph(mock_agent)\n assert isinstance(graph, graphviz.Source)\n assert \"digraph G\" in graph.source\n assert \"graph [splines=true];\" in graph.source\n assert 'node [fontname=\"Arial\"];' in graph.source\n assert \"edge [penwidth=1.5];\" in graph.source\n assert (\n '\"__start__\" [label=\"__start__\", shape=ellipse, style=filled, '\n \"fillcolor=lightblue, width=0.5, height=0.3];\" in graph.source", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "test_cycle_detection", - "kind": 2, - "importPath": "tests.test_visualization", - "description": "tests.test_visualization", - "peekOfCode": "def test_cycle_detection():\n agent_a = Agent(name=\"A\")\n agent_b = Agent(name=\"B\")\n agent_a.handoffs.append(agent_b)\n agent_b.handoffs.append(agent_a)\n nodes = get_all_nodes(agent_a)\n edges = get_all_edges(agent_a)\n assert nodes.count('\"A\" [label=\"A\"') == 1\n assert nodes.count('\"B\" [label=\"B\"') == 1\n assert '\"A\" -> \"B\"' in edges", - "detail": "tests.test_visualization", - "documentation": {} - }, - { - "label": "SpanProcessorForTests", - "kind": 6, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "class SpanProcessorForTests(TracingProcessor):\n \"\"\"\n A simple processor that stores finished spans in memory.\n This is thread-safe and suitable for tests or basic usage.\n \"\"\"\n def __init__(self) -> None:\n self._lock = threading.Lock()\n # Dictionary of trace_id -> list of spans\n self._spans: list[Span[Any]] = []\n self._traces: list[Trace] = []", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "fetch_ordered_spans", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def fetch_ordered_spans() -> list[Span[Any]]:\n return SPAN_PROCESSOR_TESTING.get_ordered_spans()\ndef fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "fetch_traces", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "fetch_events", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "assert_no_spans", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:\n raise AssertionError(f\"Expected 0 spans, got {len(spans)}\")\ndef assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")\n assert_no_spans()\ndef fetch_normalized_spans(", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "assert_no_traces", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def assert_no_traces():\n traces = fetch_traces()\n if traces:\n raise AssertionError(f\"Expected 0 traces, got {len(traces)}\")\n assert_no_spans()\ndef fetch_normalized_spans(\n keep_span_id: bool = False, keep_trace_id: bool = False\n) -> list[dict[str, Any]]:\n nodes: dict[tuple[str, str | None], dict[str, Any]] = {}\n traces = []", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "fetch_normalized_spans", - "kind": 2, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "def fetch_normalized_spans(\n keep_span_id: bool = False, keep_trace_id: bool = False\n) -> list[dict[str, Any]]:\n nodes: dict[tuple[str, str | None], dict[str, Any]] = {}\n traces = []\n for trace_obj in fetch_traces():\n trace = trace_obj.export()\n assert trace\n assert trace.pop(\"object\") == \"trace\"\n assert trace[\"id\"].startswith(\"trace_\")", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "TestSpanProcessorEvent", - "kind": 5, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "TestSpanProcessorEvent = Literal[\"trace_start\", \"trace_end\", \"span_start\", \"span_end\"]\nclass SpanProcessorForTests(TracingProcessor):\n \"\"\"\n A simple processor that stores finished spans in memory.\n This is thread-safe and suitable for tests or basic usage.\n \"\"\"\n def __init__(self) -> None:\n self._lock = threading.Lock()\n # Dictionary of trace_id -> list of spans\n self._spans: list[Span[Any]] = []", - "detail": "tests.testing_processor", - "documentation": {} - }, - { - "label": "SPAN_PROCESSOR_TESTING", - "kind": 5, - "importPath": "tests.testing_processor", - "description": "tests.testing_processor", - "peekOfCode": "SPAN_PROCESSOR_TESTING = SpanProcessorForTests()\ndef fetch_ordered_spans() -> list[Span[Any]]:\n return SPAN_PROCESSOR_TESTING.get_ordered_spans()\ndef fetch_traces() -> list[Trace]:\n return SPAN_PROCESSOR_TESTING.get_traces()\ndef fetch_events() -> list[TestSpanProcessorEvent]:\n return SPAN_PROCESSOR_TESTING._events\ndef assert_no_spans():\n spans = fetch_ordered_spans()\n if spans:", - "detail": "tests.testing_processor", - "documentation": {} - } -] \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index a75c1414f..000000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Python Debugger: Python File", - "type": "debugpy", - "request": "launch", - "program": "${file}" - } - ] -} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 9b388533a..000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "python.testing.pytestArgs": [ - "tests" - ], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true -} \ No newline at end of file