From eefa9bfc9e73034c5c82ac3c0683d0dd8657753d Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 9 Dec 2025 08:35:22 -0500 Subject: [PATCH 1/7] add support for toolset decorator in temporal --- docs/durable_execution/temporal.md | 10 +- docs/toolsets.md | 8 + .../pydantic_ai/agent/__init__.py | 8 +- .../durable_exec/temporal/_dynamic_toolset.py | 155 ++++++++++++ .../durable_exec/temporal/_toolset.py | 14 + .../pydantic_ai/toolsets/_dynamic.py | 4 +- tests/test_temporal.py | 239 ++++++++++++++++++ 7 files changed, 435 insertions(+), 3 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index f52626caff..329c884b17 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -156,7 +156,15 @@ There are a few considerations specific to agents and toolsets when using Tempor To ensure that Temporal knows what code to run when an activity fails or is interrupted and then restarted, even if your code is changed in between, each activity needs to have a name that's stable and unique. -When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset] and [`MCPServer`][pydantic_ai.mcp.MCPServer]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the durable agent has been deployed to production as this would break active workflows. +When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset], [`MCPServer`][pydantic_ai.mcp.MCPServer], and dynamic toolsets created with [`@agent.toolset`][pydantic_ai.Agent.toolset]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the durable agent has been deployed to production as this would break active workflows. + +For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter can be set explicitly or it will default to the function name: + +```python +@agent.toolset(id='my_dynamic_tools') +def my_toolset(ctx: RunContext[MyDeps]) -> FunctionToolset: + ... +``` Other than that, any agent and toolset will just work! diff --git a/docs/toolsets.md b/docs/toolsets.md index 1b041b3baa..3e8dea76bb 100644 --- a/docs/toolsets.md +++ b/docs/toolsets.md @@ -598,6 +598,14 @@ To register a dynamic toolset, you can pass a function that takes [`RunContext`] By default, the function will be called again ahead of each agent run step. If you are using the decorator, you can optionally provide a `per_run_step=False` argument to indicate that the toolset only needs to be built once for the entire run. +When using [Temporal durable execution](./durable_execution/temporal.md), the decorator also accepts an `id` parameter to uniquely identify the toolset. If not provided, the function name is used as the ID. + +```python +@agent.toolset(id='my_toolset') +def my_dynamic_toolset(ctx: RunContext[MyDeps]): + return FunctionToolset(...) +``` + ```python {title="dynamic_toolset.py", requires="function_toolset.py"} from dataclasses import dataclass from typing import Literal diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 19edb4a619..631d87c6af 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1242,6 +1242,7 @@ def toolset( /, *, per_run_step: bool = True, + id: str | None = None, ) -> Callable[[ToolsetFunc[AgentDepsT]], ToolsetFunc[AgentDepsT]]: ... def toolset( @@ -1250,6 +1251,7 @@ def toolset( /, *, per_run_step: bool = True, + id: str | None = None, ) -> Any: """Decorator to register a toolset function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its only argument. @@ -1271,10 +1273,14 @@ async def simple_toolset(ctx: RunContext[str]) -> AbstractToolset[str]: Args: func: The toolset function to register. per_run_step: Whether to re-evaluate the toolset for each run step. Defaults to True. + id: An optional unique ID for the dynamic toolset. Required for use with durable execution + environments like Temporal, where the ID identifies the toolset's activities within the workflow. + If not provided, defaults to the function name. """ def toolset_decorator(func_: ToolsetFunc[AgentDepsT]) -> ToolsetFunc[AgentDepsT]: - self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step)) + toolset_id = id if id is not None else func_.__name__ + self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step, _id=toolset_id)) return func_ return toolset_decorator if func is None else toolset_decorator(func) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py new file mode 100644 index 0000000000..7ff0b00f5c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py @@ -0,0 +1,155 @@ +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import dataclass +from typing import Any, Literal + +from pydantic import ConfigDict, with_config +from temporalio import activity, workflow +from temporalio.workflow import ActivityConfig +from typing_extensions import Self + +from pydantic_ai import ToolsetTool +from pydantic_ai.tools import AgentDepsT, RunContext, ToolDefinition +from pydantic_ai.toolsets._dynamic import DynamicToolset +from pydantic_ai.toolsets.external import TOOL_SCHEMA_VALIDATOR + +from ._run_context import TemporalRunContext +from ._toolset import ( + CallToolParams, + CallToolResult, + TemporalWrapperToolset, +) + + +@dataclass +@with_config(ConfigDict(arbitrary_types_allowed=True)) +class _GetToolsParams: + serialized_run_context: Any + + +class TemporalDynamicToolset(TemporalWrapperToolset[AgentDepsT]): + """Temporal wrapper for DynamicToolset. + + This provides static activities (get_tools, call_tool) that are registered at worker start time, + while the actual toolset selection happens dynamically inside the activities where I/O is allowed. + """ + + def __init__( + self, + toolset: DynamicToolset[AgentDepsT], + *, + activity_name_prefix: str, + activity_config: ActivityConfig, + tool_activity_config: dict[str, ActivityConfig | Literal[False]], + deps_type: type[AgentDepsT], + run_context_type: type[TemporalRunContext[AgentDepsT]] = TemporalRunContext[AgentDepsT], + ): + super().__init__(toolset) + self.activity_config = activity_config + self.tool_activity_config = tool_activity_config + self.run_context_type = run_context_type + + async def get_tools_activity(params: _GetToolsParams, deps: AgentDepsT) -> dict[str, ToolDefinition]: + """Activity that calls the dynamic function and returns tool definitions.""" + ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) + + async with self.wrapped: + tools = await self.wrapped.get_tools(ctx) + return {name: tool.tool_def for name, tool in tools.items()} + + get_tools_activity.__annotations__['deps'] = deps_type + + self.get_tools_activity = activity.defn(name=f'{activity_name_prefix}__dynamic_toolset__{self.id}__get_tools')( + get_tools_activity + ) + + async def call_tool_activity(params: CallToolParams, deps: AgentDepsT) -> CallToolResult: + """Activity that instantiates the dynamic toolset and calls the tool.""" + ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) + + async with self.wrapped: + tools = await self.wrapped.get_tools(ctx) + tool = tools.get(params.name) + if tool is None: + from pydantic_ai.exceptions import UserError + + raise UserError( + f'Tool {params.name!r} not found in dynamic toolset {self.id!r}. ' + 'The dynamic toolset function may have returned a different toolset than expected.' + ) + + args_dict = tool.args_validator.validate_python(params.tool_args) + return await self._wrap_call_tool_result(self.wrapped.call_tool(params.name, args_dict, ctx, tool)) + + call_tool_activity.__annotations__['deps'] = deps_type + + self.call_tool_activity = activity.defn(name=f'{activity_name_prefix}__dynamic_toolset__{self.id}__call_tool')( + call_tool_activity + ) + + @property + def temporal_activities(self) -> list[Callable[..., Any]]: + return [self.get_tools_activity, self.call_tool_activity] + + async def __aenter__(self) -> Self: + return self + + async def __aexit__(self, *args: Any) -> bool | None: + return None + + async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: + if not workflow.in_workflow(): + return await super().get_tools(ctx) + + serialized_run_context = self.run_context_type.serialize_run_context(ctx) + tool_defs = await workflow.execute_activity( + activity=self.get_tools_activity, + args=[ + _GetToolsParams(serialized_run_context=serialized_run_context), + ctx.deps, + ], + **self.activity_config, + ) + return {name: self._tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} + + async def call_tool( + self, + name: str, + tool_args: dict[str, Any], + ctx: RunContext[AgentDepsT], + tool: ToolsetTool[AgentDepsT], + ) -> Any: + if not workflow.in_workflow(): + return await super().call_tool(name, tool_args, ctx, tool) + + tool_activity_config = self.tool_activity_config.get(name) + if tool_activity_config is False: + return await super().call_tool(name, tool_args, ctx, tool) + + merged_config = self.activity_config | (tool_activity_config or {}) + serialized_run_context = self.run_context_type.serialize_run_context(ctx) + return self._unwrap_call_tool_result( + await workflow.execute_activity( + activity=self.call_tool_activity, + args=[ + CallToolParams( + name=name, + tool_args=tool_args, + serialized_run_context=serialized_run_context, + tool_def=tool.tool_def, + ), + ctx.deps, + ], + **merged_config, + ) + ) + + def _tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool[AgentDepsT]: + """Create a ToolsetTool from a ToolDefinition for use outside activities.""" + return ToolsetTool( + toolset=self, + tool_def=tool_def, + max_retries=1, + args_validator=TOOL_SCHEMA_VALIDATOR, + ) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py index 850f001a46..c39fdc177b 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py @@ -127,6 +127,20 @@ def temporalize_toolset( run_context_type=run_context_type, ) + from pydantic_ai.toolsets._dynamic import DynamicToolset + + if isinstance(toolset, DynamicToolset): + from ._dynamic_toolset import TemporalDynamicToolset + + return TemporalDynamicToolset( + toolset, + activity_name_prefix=activity_name_prefix, + activity_config=activity_config, + tool_activity_config=tool_activity_config, + deps_type=deps_type, + run_context_type=run_context_type, + ) + try: from pydantic_ai.mcp import MCPServer diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py index 9bb622ce23..b7046ccfe9 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py @@ -27,13 +27,15 @@ class DynamicToolset(AbstractToolset[AgentDepsT]): toolset_func: ToolsetFunc[AgentDepsT] per_run_step: bool = True + _id: str | None = None + """An optional unique ID for the toolset. Required for use with durable execution like Temporal.""" _toolset: AbstractToolset[AgentDepsT] | None = None _run_step: int | None = None @property def id(self) -> str | None: - return None # pragma: no cover + return self._id async def __aenter__(self) -> Self: return self diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 98039d9078..94f3f444ca 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -64,6 +64,7 @@ from temporalio.workflow import ActivityConfig from pydantic_ai.durable_exec.temporal import AgentPlugin, LogfirePlugin, PydanticAIPlugin, TemporalAgent + from pydantic_ai.durable_exec.temporal._dynamic_toolset import TemporalDynamicToolset from pydantic_ai.durable_exec.temporal._function_toolset import TemporalFunctionToolset from pydantic_ai.durable_exec.temporal._mcp_server import TemporalMCPServer from pydantic_ai.durable_exec.temporal._model import TemporalModel @@ -1104,6 +1105,244 @@ async def test_toolset_without_id(): TemporalAgent(Agent(model=model, name='test_agent', toolsets=[FunctionToolset()])) +# --- DynamicToolset / @agent.toolset tests --- + + +def dynamic_toolset_model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + """Model logic for testing dynamic toolsets.""" + # If no tool calls yet, call the tool + if len(messages) == 1: + return ModelResponse(parts=[ToolCallPart(tool_name='get_dynamic_data', args={'key': 'test'})]) + # After tool call, return final result + return ModelResponse(parts=[TextPart(content='Dynamic result received')]) + + +dynamic_toolset_function_model = FunctionModel(dynamic_toolset_model_logic) + +dynamic_toolset_agent = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent') + + +@dynamic_toolset_agent.toolset +def my_dynamic_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: + toolset = FunctionToolset[None](id='inner_dynamic') + + @toolset.tool + def get_dynamic_data(key: str) -> str: + return f'dynamic_value_for_{key}' + + return toolset + + +dynamic_toolset_temporal_agent = TemporalAgent( + dynamic_toolset_agent, + activity_config=BASE_ACTIVITY_CONFIG, +) + + +@workflow.defn +class DynamicToolsetAgentWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + result = await dynamic_toolset_temporal_agent.run(prompt) + return result.output + + +async def test_dynamic_toolset_temporal_agent_structure(): + """Test that DynamicToolset is correctly wrapped by TemporalDynamicToolset.""" + toolsets = dynamic_toolset_temporal_agent.toolsets + + # Should have the dynamic toolset wrapped + assert any(isinstance(t, TemporalDynamicToolset) for t in toolsets) + + # Find the TemporalDynamicToolset + for toolset in toolsets: + if isinstance(toolset, TemporalDynamicToolset): + # Check it has the correct id (defaults to function name) + assert toolset.id == 'my_dynamic_toolset' + # Check it has both get_tools and call_tool activities + activities = toolset.temporal_activities + assert len(activities) == 2 + activity_names: list[str] = [ + name + for a in activities + if (name := ActivityDefinition.must_from_callable(a).name) is not None # pyright: ignore[reportUnknownMemberType] + ] + assert any('get_tools' in name for name in activity_names) + assert any('call_tool' in name for name in activity_names) + break + else: + pytest.fail('TemporalDynamicToolset not found in toolsets') + + +async def test_dynamic_toolset_in_workflow(client: Client): + """Test that @agent.toolset works correctly in a Temporal workflow.""" + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[DynamicToolsetAgentWorkflow], + plugins=[AgentPlugin(dynamic_toolset_temporal_agent)], + ): + output = await client.execute_workflow( + DynamicToolsetAgentWorkflow.run, + args=['Call the dynamic tool with test key'], + id='test_dynamic_toolset_workflow', + task_queue=TASK_QUEUE, + ) + assert output == 'Dynamic result received' + + +# Test with explicit id parameter +dynamic_toolset_agent_with_id = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent_with_id') + + +@dynamic_toolset_agent_with_id.toolset(id='custom_toolset_id') +def my_custom_id_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: + toolset = FunctionToolset[None](id='inner') + + @toolset.tool + def get_custom_data(key: str) -> str: + return f'custom_value_for_{key}' + + return toolset + + +dynamic_toolset_with_id_temporal_agent = TemporalAgent( + dynamic_toolset_agent_with_id, + activity_config=BASE_ACTIVITY_CONFIG, +) + + +async def test_dynamic_toolset_with_custom_id(): + """Test that explicit id parameter is respected.""" + toolsets = dynamic_toolset_with_id_temporal_agent.toolsets + + for toolset in toolsets: + if isinstance(toolset, TemporalDynamicToolset): + assert toolset.id == 'custom_toolset_id' + break + else: + pytest.fail('TemporalDynamicToolset not found in toolsets') + + +async def test_dynamic_toolset_get_tools_outside_workflow(): + """Test that get_tools works when called outside a workflow (delegates to wrapped toolset).""" + ctx = RunContext( + deps=None, + model=TestModel(), + usage=RunUsage(), + run_id='test-run', + ) + + for toolset in dynamic_toolset_temporal_agent.toolsets: + if isinstance(toolset, TemporalDynamicToolset): + async with toolset: + tools = await toolset.get_tools(ctx) + assert 'get_dynamic_data' in tools + break + else: + pytest.fail('TemporalDynamicToolset not found') + + +async def test_dynamic_toolset_call_tool_outside_workflow(): + """Test that call_tool works when called outside a workflow (delegates to wrapped toolset).""" + ctx = RunContext( + deps=None, + model=TestModel(), + usage=RunUsage(), + run_id='test-run', + ) + + for toolset in dynamic_toolset_temporal_agent.toolsets: + if isinstance(toolset, TemporalDynamicToolset): + async with toolset: + tools = await toolset.get_tools(ctx) + tool = tools['get_dynamic_data'] + result = await toolset.call_tool('get_dynamic_data', {'key': 'mykey'}, ctx, tool) + assert result == 'dynamic_value_for_mykey' + break + else: + pytest.fail('TemporalDynamicToolset not found') + + +async def test_dynamic_toolset_tool_not_found_in_activity(): + """Test that calling a non-existent tool in the activity raises UserError.""" + from pydantic_ai.durable_exec.temporal._toolset import CallToolParams + + ctx = RunContext( + deps=None, + model=TestModel(), + usage=RunUsage(), + run_id='test-run', + ) + + for toolset in dynamic_toolset_temporal_agent.toolsets: + if isinstance(toolset, TemporalDynamicToolset): + # Serialize the run context as the activity would receive it + serialized_ctx = TemporalRunContext.serialize_run_context(ctx) + + # Create params with a non-existent tool name + params = CallToolParams( + name='nonexistent_tool', + tool_args={'key': 'test'}, + serialized_run_context=serialized_ctx, + tool_def=None, + ) + + # Call the activity directly - this should raise UserError + with pytest.raises(UserError, match="Tool 'nonexistent_tool' not found in dynamic toolset"): + await toolset.call_tool_activity(params, None) + break + else: + pytest.fail('TemporalDynamicToolset not found') + + +# Create a dynamic toolset with activity disabled for testing +dynamic_toolset_agent_disabled = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent_disabled') + + +@dynamic_toolset_agent_disabled.toolset +def my_dynamic_toolset_disabled(ctx: RunContext[None]) -> FunctionToolset[None]: + toolset = FunctionToolset[None](id='inner_dynamic_disabled') + + @toolset.tool + def get_dynamic_data(key: str) -> str: + return f'disabled_dynamic_value_for_{key}' + + return toolset + + +dynamic_toolset_activity_disabled_temporal_agent = TemporalAgent( + dynamic_toolset_agent_disabled, + activity_config=BASE_ACTIVITY_CONFIG, + tool_activity_config={'my_dynamic_toolset_disabled': {'get_dynamic_data': False}}, +) + + +@workflow.defn +class DynamicToolsetActivityDisabledWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + result = await dynamic_toolset_activity_disabled_temporal_agent.run(prompt) + return result.output + + +async def test_dynamic_toolset_activity_disabled(client: Client): + """Test that call_tool with activity disabled runs tool directly in workflow.""" + async with Worker( + client, + task_queue=TASK_QUEUE, + workflows=[DynamicToolsetActivityDisabledWorkflow], + plugins=[AgentPlugin(dynamic_toolset_activity_disabled_temporal_agent)], + ): + output = await client.execute_workflow( + DynamicToolsetActivityDisabledWorkflow.run, + args=['Call the dynamic tool with test key'], + id='test_dynamic_toolset_activity_disabled_wf', + task_queue=TASK_QUEUE, + ) + assert output == 'Dynamic result received' + + async def test_temporal_agent(): assert isinstance(complex_temporal_agent.model, TemporalModel) assert complex_temporal_agent.model.wrapped == complex_agent.model From cb1e005ec6df8cec5a92e4acf084816e9261b79f Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Tue, 9 Dec 2025 13:11:24 -0500 Subject: [PATCH 2/7] fix tests --- docs/durable_execution/temporal.md | 2 +- docs/toolsets.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index 329c884b17..f9aeec1c88 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -160,7 +160,7 @@ When `TemporalAgent` dynamically creates activities for the wrapped agent's mode For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter can be set explicitly or it will default to the function name: -```python +```python {test="skip"} @agent.toolset(id='my_dynamic_tools') def my_toolset(ctx: RunContext[MyDeps]) -> FunctionToolset: ... diff --git a/docs/toolsets.md b/docs/toolsets.md index 3e8dea76bb..fc327cdc69 100644 --- a/docs/toolsets.md +++ b/docs/toolsets.md @@ -600,7 +600,7 @@ By default, the function will be called again ahead of each agent run step. If y When using [Temporal durable execution](./durable_execution/temporal.md), the decorator also accepts an `id` parameter to uniquely identify the toolset. If not provided, the function name is used as the ID. -```python +```python {test="skip"} @agent.toolset(id='my_toolset') def my_dynamic_toolset(ctx: RunContext[MyDeps]): return FunctionToolset(...) From 548c2d57681b2dcd4ce3f3db1289de6917452153 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 10 Dec 2025 10:24:28 -0500 Subject: [PATCH 3/7] small review changes --- docs/durable_execution/temporal.md | 4 +++- docs/toolsets.md | 6 ------ pydantic_ai_slim/pydantic_ai/agent/__init__.py | 4 ++-- .../pydantic_ai/durable_exec/temporal/_toolset.py | 3 +-- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index f9aeec1c88..340dfbbfca 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -156,7 +156,7 @@ There are a few considerations specific to agents and toolsets when using Tempor To ensure that Temporal knows what code to run when an activity fails or is interrupted and then restarted, even if your code is changed in between, each activity needs to have a name that's stable and unique. -When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset], [`MCPServer`][pydantic_ai.mcp.MCPServer], and dynamic toolsets created with [`@agent.toolset`][pydantic_ai.Agent.toolset]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the durable agent has been deployed to production as this would break active workflows. +When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset] and [`MCPServer`][pydantic_ai.mcp.MCPServer]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the durable agent has been deployed to production as this would break active workflows. For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter can be set explicitly or it will default to the function name: @@ -166,6 +166,8 @@ def my_toolset(ctx: RunContext[MyDeps]) -> FunctionToolset: ... ``` +Note that with Temporal, `per_run_step=False` is not respected, as the toolset always needs to be created on-the-fly in the activity. + Other than that, any agent and toolset will just work! ### Instructions Functions, Output Functions, and History Processors diff --git a/docs/toolsets.md b/docs/toolsets.md index fc327cdc69..6d87ce7840 100644 --- a/docs/toolsets.md +++ b/docs/toolsets.md @@ -600,12 +600,6 @@ By default, the function will be called again ahead of each agent run step. If y When using [Temporal durable execution](./durable_execution/temporal.md), the decorator also accepts an `id` parameter to uniquely identify the toolset. If not provided, the function name is used as the ID. -```python {test="skip"} -@agent.toolset(id='my_toolset') -def my_dynamic_toolset(ctx: RunContext[MyDeps]): - return FunctionToolset(...) -``` - ```python {title="dynamic_toolset.py", requires="function_toolset.py"} from dataclasses import dataclass from typing import Literal diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 631d87c6af..28b976dc8b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1280,7 +1280,7 @@ async def simple_toolset(ctx: RunContext[str]) -> AbstractToolset[str]: def toolset_decorator(func_: ToolsetFunc[AgentDepsT]) -> ToolsetFunc[AgentDepsT]: toolset_id = id if id is not None else func_.__name__ - self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step, _id=toolset_id)) + self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step, id=toolset_id)) return func_ return toolset_decorator if func is None else toolset_decorator(func) @@ -1384,7 +1384,7 @@ def _get_toolset( # Copy the dynamic toolsets to ensure each run has its own instances def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractToolset[AgentDepsT]: if isinstance(toolset, DynamicToolset): - return dataclasses.replace(toolset) + return toolset.copy() else: return toolset diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py index c39fdc177b..93d1de6732 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py @@ -12,6 +12,7 @@ from pydantic_ai import AbstractToolset, FunctionToolset, WrapperToolset from pydantic_ai.exceptions import ApprovalRequired, CallDeferred, ModelRetry from pydantic_ai.tools import AgentDepsT, ToolDefinition +from pydantic_ai.toolsets._dynamic import DynamicToolset from ._run_context import TemporalRunContext @@ -127,8 +128,6 @@ def temporalize_toolset( run_context_type=run_context_type, ) - from pydantic_ai.toolsets._dynamic import DynamicToolset - if isinstance(toolset, DynamicToolset): from ._dynamic_toolset import TemporalDynamicToolset From e585bfcbf83b84982577a0f395bd7c3323ec3d06 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 10 Dec 2025 17:25:54 -0500 Subject: [PATCH 4/7] refactor: address review feedback for DynamicToolset Temporal support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Core changes: - DynamicToolset: convert to plain class with custom __init__, add copy() - TemporalDynamicToolset: use _call_tool_in_activity shared method - TemporalFunctionToolset: use _call_tool_in_activity shared method - temporalize_toolset: move DynamicToolset import to top Test changes: - Add test_fastmcp_dynamic_toolset_in_workflow for MCP lifecycle in DynamicToolset - Add test_dynamic_toolset_id and test_agent_toolset_decorator_id - Update test_visit_and_replace for DynamicToolset plain class 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 --- .../durable_exec/temporal/_dynamic_toolset.py | 43 +- .../temporal/_function_toolset.py | 5 +- .../durable_exec/temporal/_toolset.py | 19 +- .../pydantic_ai/toolsets/_dynamic.py | 49 +- ...t_fastmcp_dynamic_toolset_in_workflow.yaml | 1187 +++++++++++++++++ tests/test_temporal.py | 211 +-- tests/test_toolsets.py | 84 +- 7 files changed, 1386 insertions(+), 212 deletions(-) create mode 100644 tests/cassettes/test_temporal/test_fastmcp_dynamic_toolset_in_workflow.yaml diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py index 7ff0b00f5c..0a4fca0fc2 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py @@ -10,6 +10,7 @@ from typing_extensions import Self from pydantic_ai import ToolsetTool +from pydantic_ai.exceptions import UserError from pydantic_ai.tools import AgentDepsT, RunContext, ToolDefinition from pydantic_ai.toolsets._dynamic import DynamicToolset from pydantic_ai.toolsets.external import TOOL_SCHEMA_VALIDATOR @@ -28,6 +29,14 @@ class _GetToolsParams: serialized_run_context: Any +@dataclass +class _ToolInfo: + """Serializable tool information returned from get_tools_activity.""" + + tool_def: ToolDefinition + max_retries: int + + class TemporalDynamicToolset(TemporalWrapperToolset[AgentDepsT]): """Temporal wrapper for DynamicToolset. @@ -50,13 +59,16 @@ def __init__( self.tool_activity_config = tool_activity_config self.run_context_type = run_context_type - async def get_tools_activity(params: _GetToolsParams, deps: AgentDepsT) -> dict[str, ToolDefinition]: + async def get_tools_activity(params: _GetToolsParams, deps: AgentDepsT) -> dict[str, _ToolInfo]: """Activity that calls the dynamic function and returns tool definitions.""" ctx = self.run_context_type.deserialize_run_context(params.serialized_run_context, deps=deps) async with self.wrapped: tools = await self.wrapped.get_tools(ctx) - return {name: tool.tool_def for name, tool in tools.items()} + return { + name: _ToolInfo(tool_def=tool.tool_def, max_retries=tool.max_retries) + for name, tool in tools.items() + } get_tools_activity.__annotations__['deps'] = deps_type @@ -71,16 +83,13 @@ async def call_tool_activity(params: CallToolParams, deps: AgentDepsT) -> CallTo async with self.wrapped: tools = await self.wrapped.get_tools(ctx) tool = tools.get(params.name) - if tool is None: - from pydantic_ai.exceptions import UserError - + if tool is None: # pragma: no cover raise UserError( f'Tool {params.name!r} not found in dynamic toolset {self.id!r}. ' 'The dynamic toolset function may have returned a different toolset than expected.' ) - args_dict = tool.args_validator.validate_python(params.tool_args) - return await self._wrap_call_tool_result(self.wrapped.call_tool(params.name, args_dict, ctx, tool)) + return await self._call_tool_in_activity(params.name, params.tool_args, ctx, tool) call_tool_activity.__annotations__['deps'] = deps_type @@ -93,9 +102,13 @@ def temporal_activities(self) -> list[Callable[..., Any]]: return [self.get_tools_activity, self.call_tool_activity] async def __aenter__(self) -> Self: + if not workflow.in_workflow(): + await self.wrapped.__aenter__() return self async def __aexit__(self, *args: Any) -> bool | None: + if not workflow.in_workflow(): + return await self.wrapped.__aexit__(*args) return None async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: @@ -103,7 +116,7 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ return await super().get_tools(ctx) serialized_run_context = self.run_context_type.serialize_run_context(ctx) - tool_defs = await workflow.execute_activity( + tool_infos = await workflow.execute_activity( activity=self.get_tools_activity, args=[ _GetToolsParams(serialized_run_context=serialized_run_context), @@ -111,7 +124,7 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ ], **self.activity_config, ) - return {name: self._tool_for_tool_def(tool_def) for name, tool_def in tool_defs.items()} + return {name: self._tool_for_tool_info(tool_info) for name, tool_info in tool_infos.items()} async def call_tool( self, @@ -145,11 +158,15 @@ async def call_tool( ) ) - def _tool_for_tool_def(self, tool_def: ToolDefinition) -> ToolsetTool[AgentDepsT]: - """Create a ToolsetTool from a ToolDefinition for use outside activities.""" + def _tool_for_tool_info(self, tool_info: _ToolInfo) -> ToolsetTool[AgentDepsT]: + """Create a ToolsetTool from a _ToolInfo for use outside activities. + + We use `TOOL_SCHEMA_VALIDATOR` here which just parses JSON without additional validation, + because the actual args validation happens inside `call_tool_activity`. + """ return ToolsetTool( toolset=self, - tool_def=tool_def, - max_retries=1, + tool_def=tool_info.tool_def, + max_retries=tool_info.max_retries, args_validator=TOOL_SCHEMA_VALIDATOR, ) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py index 05bc3f5f2c..8f9a2cd85b 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_function_toolset.py @@ -46,10 +46,7 @@ async def call_tool_activity(params: CallToolParams, deps: AgentDepsT) -> CallTo 'Removing or renaming tools during an agent run is not supported with Temporal.' ) from e - # The tool args will already have been validated into their proper types in the `ToolManager`, - # but `execute_activity` would have turned them into simple Python types again, so we need to re-validate them. - args_dict = tool.args_validator.validate_python(params.tool_args) - return await self._wrap_call_tool_result(self.wrapped.call_tool(name, args_dict, ctx, tool)) + return await self._call_tool_in_activity(name, params.tool_args, ctx, tool) # Set type hint explicitly so that Temporal can take care of serialization and deserialization call_tool_activity.__annotations__['deps'] = deps_type diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py index 93d1de6732..c8c8535e5e 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_toolset.py @@ -9,9 +9,9 @@ from temporalio.workflow import ActivityConfig from typing_extensions import assert_never -from pydantic_ai import AbstractToolset, FunctionToolset, WrapperToolset +from pydantic_ai import AbstractToolset, FunctionToolset, ToolsetTool, WrapperToolset from pydantic_ai.exceptions import ApprovalRequired, CallDeferred, ModelRetry -from pydantic_ai.tools import AgentDepsT, ToolDefinition +from pydantic_ai.tools import AgentDepsT, RunContext, ToolDefinition from pydantic_ai.toolsets._dynamic import DynamicToolset from ._run_context import TemporalRunContext @@ -97,6 +97,21 @@ def _unwrap_call_tool_result(self, result: CallToolResult) -> Any: else: assert_never(result) + async def _call_tool_in_activity( + self, + name: str, + tool_args: dict[str, Any], + ctx: RunContext[AgentDepsT], + tool: ToolsetTool[AgentDepsT], + ) -> CallToolResult: + """Call a tool inside an activity, re-validating args that were deserialized. + + The tool args will already have been validated into their proper types in the `ToolManager`, + but `execute_activity` would have turned them into simple Python types again, so we need to re-validate them. + """ + args_dict = tool.args_validator.validate_python(tool_args) + return await self._wrap_call_tool_result(self.wrapped.call_tool(name, args_dict, ctx, tool)) + def temporalize_toolset( toolset: AbstractToolset[AgentDepsT], diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py index b7046ccfe9..2679915d15 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py @@ -2,7 +2,6 @@ import inspect from collections.abc import Awaitable, Callable -from dataclasses import dataclass, replace from typing import Any, TypeAlias from typing_extensions import Self @@ -17,26 +16,53 @@ """A sync/async function which takes a run context and returns a toolset.""" -@dataclass class DynamicToolset(AbstractToolset[AgentDepsT]): """A toolset that dynamically builds a toolset using a function that takes the run context. It should only be used during a single agent run as it stores the generated toolset. - To use it multiple times, copy it using `dataclasses.replace`. + To use it multiple times, use the `copy()` method. """ toolset_func: ToolsetFunc[AgentDepsT] - per_run_step: bool = True - _id: str | None = None - """An optional unique ID for the toolset. Required for use with durable execution like Temporal.""" - - _toolset: AbstractToolset[AgentDepsT] | None = None - _run_step: int | None = None + per_run_step: bool + _id: str | None + _toolset: AbstractToolset[AgentDepsT] | None + _run_step: int | None + + def __init__( + self, + toolset_func: ToolsetFunc[AgentDepsT], + *, + per_run_step: bool = True, + id: str | None = None, + ): + """Build a new dynamic toolset. + + Args: + toolset_func: A function that takes the run context and returns a toolset or None. + per_run_step: Whether to re-evaluate the toolset for each run step. Defaults to True. + id: An optional unique ID for the toolset. A toolset needs to have an ID in order to be used + in a durable execution environment like Temporal, in which case the ID will be used to + identify the toolset's activities within the workflow. + """ + self.toolset_func = toolset_func + self.per_run_step = per_run_step + self._id = id + self._toolset = None + self._run_step = None @property def id(self) -> str | None: return self._id + def copy(self) -> DynamicToolset[AgentDepsT]: + """Create a copy of this toolset for use in a new agent run.""" + return DynamicToolset( + self.toolset_func, + per_run_step=self.per_run_step, + id=self._id, + ) + async def __aenter__(self) -> Self: return self @@ -86,4 +112,7 @@ def visit_and_replace( if self._toolset is None: return super().visit_and_replace(visitor) else: - return replace(self, _toolset=self._toolset.visit_and_replace(visitor)) + new_copy = self.copy() + new_copy._toolset = self._toolset.visit_and_replace(visitor) + new_copy._run_step = self._run_step + return new_copy diff --git a/tests/cassettes/test_temporal/test_fastmcp_dynamic_toolset_in_workflow.yaml b/tests/cassettes/test_temporal/test_fastmcp_dynamic_toolset_in_workflow.yaml new file mode 100644 index 0000000000..6096172400 --- /dev/null +++ b/tests/cassettes/test_temporal/test_fastmcp_dynamic_toolset_in_workflow.yaml @@ -0,0 +1,1187 @@ +interactions: +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '152' + content-type: + - application/json + host: + - mcp.deepwiki.com + method: POST + parsed_body: + id: 0 + jsonrpc: '2.0' + method: initialize + params: + capabilities: {} + clientInfo: + name: mcp + version: 0.1.0 + protocolVersion: '2025-06-18' + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":0,"result":{"protocolVersion":"2025-03-26","capabilities":{"tools":{"listChanged":true}},"serverInfo":{"name":"DeepWiki","version":"0.0.1"}}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=lKi9XzYtX66W%2B9XfhoDyMxmKe8bQR16G%2F3hUc3LZx14AmbTBS6dNtTQDK5bDdco1JexrO%2FdFatKVj7Eays5k%2FggAxkS98QMxpe8Wivr%2F3Y7kXwtc1K6z7GFXu96S04eHvOkKVw%3D%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '54' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + method: POST + parsed_body: + jsonrpc: '2.0' + method: notifications/initialized + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '' + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '0' + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=Dr%2BCTPCKwDsOm7SKZJX829UAswQJM7TGM5a9VDRmhVd%2FN2QIcY7xQNDYylIuQUav2MYv1dWv3qxceJMaywW7kNq93N1OO1xzoctsMCn704yHJLXUhduednBCrVkuQTJMPCTNdw%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 202 + message: Accepted +- request: + body: '' + headers: + accept: + - application/json, text/event-stream, text/event-stream + accept-encoding: + - gzip, deflate + cache-control: + - no-store + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + method: GET + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=7VNPpbENnzJhqksXXBCOI8%2FGxZFJRywD5pjWO7HBa6OPW0VGWqsa4HWlgc0yiiZFcoIrjRkIE1HDAlShfth7EzoROXxiJv9o4QtaQewqpKowR3DexVuyDBfvrjuU4FAcZBpy7g%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '46' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + method: POST + parsed_body: + id: 1 + jsonrpc: '2.0' + method: tools/list + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":1,"result":{"tools":[{"name":"read_wiki_structure","description":"Get a list of documentation topics for a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"read_wiki_contents","description":"View documentation about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"ask_question","description":"Ask any question about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"},"question":{"type":"string","description":"The question to ask about the repository"}},"required":["repoName","question"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=cNMxdnon7Ewc9GmFCyuC2Z%2BKv2vnutahwvOaZ%2FSj6q3JcknZyWJJ4jI%2FbdZlmdBmU7PFktnC0jXJzCzxts%2FOV7FvYKVLd8SD9w3j%2FzbVvFzPTDXy8YRHV4VZvNYFGHMJY4ZiEA%3D%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - cdedaeddac989e6130483fada3a2be512ac7ebf53552449423f906d8b5f282e8 + method: DELETE + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=XEB1cbp%2BoMIAhdOnn4HBZHZfR2yoqR3Z%2FDcn7GGpmgTI7GDEUudQsea1sW9%2Bacdy0yAKPYy3I9I4tQttg5BYUOWTohaxBoq3RRWNIgJyG18UU0sigXD0gHE%2FMip3KISnFezmnQ%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '1285' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: Can you tell me about the pydantic/pydantic-ai repo? Keep it short. + role: user + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: Get a list of documentation topics for a GitHub repository + name: read_wiki_structure + parameters: + additionalProperties: false + properties: + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + type: object + strict: true + type: function + - function: + description: View documentation about a GitHub repository + name: read_wiki_contents + parameters: + additionalProperties: false + properties: + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + type: object + strict: true + type: function + - function: + description: Ask any question about a GitHub repository + name: ask_question + parameters: + additionalProperties: false + properties: + question: + description: The question to ask about the repository + type: string + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + - question + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1160' + content-type: + - application/json + openai-organization: + - user-grnwlxd1653lxdzp921aoihz + openai-processing-ms: + - '571' + openai-project: + - proj_FYsIItHHgnSPdHBVMzhNBWGa + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: tool_calls + index: 0 + logprobs: null + message: + annotations: [] + content: null + refusal: null + role: assistant + tool_calls: + - function: + arguments: '{"repoName":"pydantic/pydantic-ai","question":"What is the pydantic-ai repository about?"}' + name: ask_question + id: call_6PsGSGgsIN4tDkVQjd9ozPOj + type: function + created: 1765405132 + id: chatcmpl-ClMoG5xhjRrpBUS45Xx4D2PaJcFnK + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_37d212baff + usage: + completion_tokens: 34 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 180 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 214 + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '152' + content-type: + - application/json + host: + - mcp.deepwiki.com + method: POST + parsed_body: + id: 0 + jsonrpc: '2.0' + method: initialize + params: + capabilities: {} + clientInfo: + name: mcp + version: 0.1.0 + protocolVersion: '2025-06-18' + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":0,"result":{"protocolVersion":"2025-03-26","capabilities":{"tools":{"listChanged":true}},"serverInfo":{"name":"DeepWiki","version":"0.0.1"}}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=XoQV0BNRcUUBxB0ZSWRrCYR21AKCRUpQHFTBuSDXXnp9oRJ%2FYJGL9KXqukpsG1ZsSGOH3R51tXKESx1KEh02V1H7lY7DelWa6KNp%2B6aehqRBECWrOL9exqIA4qq1gO6xQdMlKQ%3D%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '54' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + method: POST + parsed_body: + jsonrpc: '2.0' + method: notifications/initialized + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '' + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '0' + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=fXjkNmxYMvnBv5zvSilSAtuglGxLMpcArzlBXuooT%2BRazO35bd1hQ4CDu18flhDIHpQnZjr0qFS%2Br%2FHh5EDtSuQv0pRYNwTinLQGFSRQe%2FM2tS5NngcBYQfZoXMk9ofYWs2s%2Fg%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 202 + message: Accepted +- request: + body: '' + headers: + accept: + - application/json, text/event-stream, text/event-stream + accept-encoding: + - gzip, deflate + cache-control: + - no-store + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + method: GET + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=lHtihfdTRriHMcT0m0378%2FArc6OOFU7%2BaUITTAW5fWFf%2B104KApJE1JZsOYCZVarn6TBYhdoiB2JwbAjxaDWvFGxXzqnq3HsRNbl6AIsFWnhpv7aK8vQn8O6LjymRGcEhZg%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '46' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + method: POST + parsed_body: + id: 1 + jsonrpc: '2.0' + method: tools/list + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":1,"result":{"tools":[{"name":"read_wiki_structure","description":"Get a list of documentation topics for a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"read_wiki_contents","description":"View documentation about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"ask_question","description":"Ask any question about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"},"question":{"type":"string","description":"The question to ask about the repository"}},"required":["repoName","question"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=7OjssFx%2B4VVdCLoMctTsgn36bqLZhooT8Z8BCc2FEVUzQhlV%2FJNvzuKVBfyXHtKw%2BH2U3NO22m6Kftc7vHihJXoQ7RwQlookXRPeAE0GgeqlvXf3T46zG9VyJ%2F%2BqHgj%2FBLU%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '210' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + method: POST + parsed_body: + id: 2 + jsonrpc: '2.0' + method: tools/call + params: + _meta: + progressToken: 2 + arguments: + question: What is the pydantic-ai repository about? + repoName: pydantic/pydantic-ai + name: ask_question + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: ping + data: ping + + event: message + data: {"jsonrpc":"2.0","id":2,"result":{"content":[{"type":"text","text":"The `pydantic-ai` repository is a Python agent framework designed for building production-grade Generative AI applications using Large Language Models (LLMs) . It aims to provide an ergonomic and type-safe developer experience, similar to Pydantic and FastAPI, for AI agent development .\n\n## Core Purpose and Features\n\nThe framework focuses on simplifying the development of robust and reliable AI applications by offering a structured, type-safe, and extensible environment .\n\nKey features include:\n* **Type-safe Agents**: Agents are defined using `Agent[Deps, Output]` for compile-time validation, leveraging Pydantic for output validation and dependency injection .\n* **Model-agnostic Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between different models and providers .\n* **Structured Outputs**: Automatic Pydantic validation and self-correction ensure structured and reliable outputs from LLMs .\n* **Comprehensive Observability**: Integration with OpenTelemetry and native Logfire provides real-time debugging, performance monitoring, and cost tracking .\n* **Production-ready Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various protocol integrations like MCP, A2A, and AG-UI .\n* **Graph Support**: It provides a powerful way to define graphs using type hints for complex applications .\n\n## Framework Architecture\n\nThe framework is structured as a monorepo with multiple coordinated packages .\n\n### Core Packages \n\n* `pydantic-ai`: A full-featured bundle that acts as a convenience wrapper with all common extras pre-installed .\n* `pydantic-ai-slim`: The minimal core package containing the core framework with optional dependencies for specific providers .\n\n### Supporting Packages \n\n* `pydantic-graph`: A graph and state machine library that provides the agent execution graphs .\n* `pydantic-evals`: An evaluation framework for systematic testing and performance evaluation .\n\n## Agent Execution Flow\n\nPydantic AI uses `pydantic-graph` to implement agent execution as a finite state machine with three core nodes . The execution typically flows through `UserPromptNode` → `ModelRequestNode` → `CallToolsNode` .\n\n* `UserPromptNode`: Processes user input and creates the initial `ModelRequest` .\n* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` and handles retries .\n* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` .\n\nThe `Agent` class serves as the primary orchestrator and provides methods like `run()`, `run_sync()`, and `run_stream()` for different execution scenarios .\n\n## Example Usage\n\nA minimal example demonstrates how to define and run an agent :\n```python\nfrom pydantic_ai import Agent\n\nagent = Agent(\n 'anthropic:claude-sonnet-4-0',\n instructions='Be concise, reply with one sentence.',\n)\n\nresult = agent.run_sync('Where does \"hello world\" come from?')\nprint(result.output)\n```\n \n\nThis example configures an agent with a specific model and instructions, then runs it synchronously with a user prompt .\n\n## Notes\n\nThe `pydantic-ai` repository is actively maintained and considered \"Production/Stable\" . It supports Python versions 3.10 through 3.13 . The documentation is built using MkDocs and includes API references and examples .\n\nWiki pages you might want to explore:\n- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1)\n\nView this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticai-reposit_b07a3d28-6abf-4d61-856c-6c3e07e2fb0c\n"}]}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=BgpgnpbY7klKcgEuPp%2FiQbQ8dfp9cSGvdyho90uf78ILEyyBK0k8f0S%2FhwKQtwJhPtP0x32tGFXxXxPXOBOYnJI7xyAmgHJN4obbLHO40wbYEHH%2B9FZmJexZl1%2F9hbMh2nyoIQ%3D%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - 7a71ff290d9b9fb0cb7e2483f972eeca6c1178127bbf1afc8c8f612d4cd6702c + method: DELETE + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=HuWRjE8dHD6L4PuaA%2B3PRcO0aeBz8QwQjxvLotxgLtnGPDkMWc9n8N9Gsg0IcDyH95tvdjxLAzlViHw6ecbb%2BkmDFzrO%2Bxt%2FwB5X%2Fp%2BIFPXmip%2BDgxhVF%2B%2FxrP7fKolilV7O%2Bg%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '152' + content-type: + - application/json + host: + - mcp.deepwiki.com + method: POST + parsed_body: + id: 0 + jsonrpc: '2.0' + method: initialize + params: + capabilities: {} + clientInfo: + name: mcp + version: 0.1.0 + protocolVersion: '2025-06-18' + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":0,"result":{"protocolVersion":"2025-03-26","capabilities":{"tools":{"listChanged":true}},"serverInfo":{"name":"DeepWiki","version":"0.0.1"}}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + nel: + - '{"success_fraction":0,"report_to":"cf-nel","max_age":604800}' + report-to: + - '{"endpoints":[{"url":"https:\/\/a.nel.cloudflare.com\/report\/v4?s=sv8L%2BV8Em2fmN8b1RsNPYXajgbeHXPyBdTHHmOVSnWjVj%2B1H3ifdMqHTJRl7h%2FGK0QMapKh9K4IS3JIlPQLjo2NLhGPFa0R3hn6y0purkc4XmYs5VLmC8n4ciyI4KDAmKb%2FzvhSnHcsOEZHb80vyFdk8tdpflOA%3D"}],"group":"cf-nel","max_age":604800}' + transfer-encoding: + - chunked + vary: + - Accept-Encoding + status: + code: 200 + message: OK +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '54' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + method: POST + parsed_body: + jsonrpc: '2.0' + method: notifications/initialized + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '' + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '0' + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=cSRpCOzcOM93tN0EMWi%2FkFcfSHjRU8dJQB7Is95IKxzN25GR%2FhrJU05oNRU%2BavvTn%2BYTKCfcFtusioWfsysnDsJS8lXp3Qo8fU9O5eokivC554MbQ1PRhoL45gF5dECrvEI%3D"}]}' + vary: + - accept-encoding + status: + code: 202 + message: Accepted +- request: + body: '' + headers: + accept: + - application/json, text/event-stream, text/event-stream + accept-encoding: + - gzip, deflate + cache-control: + - no-store + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + method: GET + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=sCtJY9Ja%2FEG3JIRQmsl3QxquVQn38MohBBYDC4LhZLZPK%2BwMo8Uf5HyInSC5gRlic1mrjzftFnFp5c5AW2htVsj0NuepL9%2FJPvHIBCopix9aFq0SxjGr52E6WN9uHBbZnEqLzQ%3D%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '46' + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + method: POST + parsed_body: + id: 1 + jsonrpc: '2.0' + method: tools/list + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: |+ + event: message + data: {"jsonrpc":"2.0","id":1,"result":{"tools":[{"name":"read_wiki_structure","description":"Get a list of documentation topics for a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"read_wiki_contents","description":"View documentation about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"}},"required":["repoName"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}},{"name":"ask_question","description":"Ask any question about a GitHub repository","inputSchema":{"type":"object","properties":{"repoName":{"type":"string","description":"GitHub repository: owner/repo (e.g. \"facebook/react\")"},"question":{"type":"string","description":"The question to ask about the repository"}},"required":["repoName","question"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]}} + + event: ping + data: ping + + headers: + access-control-allow-headers: + - Content-Type, mcp-session-id, mcp-protocol-version + access-control-allow-methods: + - GET, POST, OPTIONS + access-control-allow-origin: + - '*' + access-control-expose-headers: + - mcp-session-id + access-control-max-age: + - '86400' + alt-svc: + - h3=":443"; ma=86400 + cache-control: + - no-cache + connection: + - keep-alive + content-type: + - text/event-stream + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=SXumXC6V%2BxXXW3V%2F3B%2FsraA58yC%2FyptvjYnhiDAhcaWxbeuI4H6iOaAK1OY%2F0KxisjMN1CAldx%2F7jWKDD4II0ajIP7U6XMVToGOSGGcZLlvgFFK7%2F7mEif4HWZojttdE7Cc%3D"}]}' + transfer-encoding: + - chunked + vary: + - accept-encoding + status: + code: 200 + message: OK +- request: + body: '' + headers: + accept: + - application/json, text/event-stream + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-type: + - application/json + host: + - mcp.deepwiki.com + mcp-protocol-version: + - '2025-03-26' + mcp-session-id: + - b50f0ce12e7805c97982cdd37986b3527767b51aa0a703bb373f31834095e6c0 + method: DELETE + uri: https://mcp.deepwiki.com/mcp + response: + body: + string: '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Method not allowed"},"id":null}' + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '82' + content-type: + - text/plain;charset=UTF-8 + nel: + - '{"report_to":"cf-nel","success_fraction":0.0,"max_age":604800}' + report-to: + - '{"group":"cf-nel","max_age":604800,"endpoints":[{"url":"https://a.nel.cloudflare.com/report/v4?s=xr1XKAXxETLz%2BaJjE4Tpg26C7LjR4%2FMO84CVDRUH59VRphgSR%2Bv7CHGXaVCnLGNmQ3h%2BGXdAxIVJoNShngYK7alFPc09ldnLP6taQ%2FtmVmgVCFvM%2BLGFh7LLr%2BrC0pPLXRo%3D"}]}' + vary: + - accept-encoding + status: + code: 405 + message: Method Not Allowed +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '5292' + content-type: + - application/json + cookie: + - __cf_bm=gFcGCCCkMTmVY3dWsQapynsHE4AbWPTskJYoyLQZSZE-1765405133-1.0.1.1-yAqe.j3bQWSajlbEOf6.r..mui2.7WJPPK9mwjVtmqRwxbPdHVMMWQIfoIeSQeRqC8zbRV865Zl_dKmld1P9cTw76JA.EsoTfjHPX_6tANI; + _cfuvid=B_WqbWMRZC6yieiFZ5B7lkgE2jBpODOfCGY8Ry81YPo-1765405133031-0.0.1.1-604800000 + host: + - api.openai.com + method: POST + parsed_body: + messages: + - content: Can you tell me about the pydantic/pydantic-ai repo? Keep it short. + role: user + - content: null + role: assistant + tool_calls: + - function: + arguments: '{"repoName":"pydantic/pydantic-ai","question":"What is the pydantic-ai repository about?"}' + name: ask_question + id: call_6PsGSGgsIN4tDkVQjd9ozPOj + type: function + - content: "The `pydantic-ai` repository is a Python agent framework designed for building production-grade Generative + AI applications using Large Language Models (LLMs) . It aims to provide an ergonomic and type-safe developer experience, + similar to Pydantic and FastAPI, for AI agent development .\n\n## Core Purpose and Features\n\nThe framework focuses + on simplifying the development of robust and reliable AI applications by offering a structured, type-safe, and extensible + environment .\n\nKey features include:\n* **Type-safe Agents**: Agents are defined using `Agent[Deps, Output]` + for compile-time validation, leveraging Pydantic for output validation and dependency injection .\n* **Model-agnostic + Design**: It supports over 15 LLM providers through a unified `Model` interface, allowing for easy switching between + different models and providers .\n* **Structured Outputs**: Automatic Pydantic validation and self-correction + ensure structured and reliable outputs from LLMs .\n* **Comprehensive Observability**: Integration with OpenTelemetry + and native Logfire provides real-time debugging, performance monitoring, and cost tracking .\n* **Production-ready + Tooling**: This includes an evaluation framework (`pydantic-evals`), durable execution capabilities, and various + protocol integrations like MCP, A2A, and AG-UI .\n* **Graph Support**: It provides a powerful way to define + graphs using type hints for complex applications .\n\n## Framework Architecture\n\nThe framework is structured + as a monorepo with multiple coordinated packages .\n\n### Core Packages \n\n* `pydantic-ai`: A full-featured bundle + that acts as a convenience wrapper with all common extras pre-installed .\n* `pydantic-ai-slim`: The minimal core + package containing the core framework with optional dependencies for specific providers .\n\n### Supporting Packages + \n\n* `pydantic-graph`: A graph and state machine library that provides the agent execution graphs .\n* `pydantic-evals`: + An evaluation framework for systematic testing and performance evaluation .\n\n## Agent Execution Flow\n\nPydantic + AI uses `pydantic-graph` to implement agent execution as a finite state machine with three core nodes . The execution + typically flows through `UserPromptNode` → `ModelRequestNode` → `CallToolsNode` .\n\n* `UserPromptNode`: Processes + user input and creates the initial `ModelRequest` .\n* `ModelRequestNode`: Calls `model.request()` or `model.request_stream()` + and handles retries .\n* `CallToolsNode`: Executes tool functions via `RunContext[Deps]` .\n\nThe `Agent` class + serves as the primary orchestrator and provides methods like `run()`, `run_sync()`, and `run_stream()` for different + execution scenarios .\n\n## Example Usage\n\nA minimal example demonstrates how to define and run an agent :\n```python\nfrom + pydantic_ai import Agent\n\nagent = Agent(\n 'anthropic:claude-sonnet-4-0',\n instructions='Be concise, reply + with one sentence.',\n)\n\nresult = agent.run_sync('Where does \"hello world\" come from?')\nprint(result.output)\n```\n + \n\nThis example configures an agent with a specific model and instructions, then runs it synchronously with a user + prompt .\n\n## Notes\n\nThe `pydantic-ai` repository is actively maintained and considered \"Production/Stable\" + . It supports Python versions 3.10 through 3.13 . The documentation is built using MkDocs and includes API references + and examples .\n\nWiki pages you might want to explore:\n- [Overview (pydantic/pydantic-ai)](/wiki/pydantic/pydantic-ai#1)\n\nView + this search on DeepWiki: https://deepwiki.com/search/what-is-the-pydanticai-reposit_b07a3d28-6abf-4d61-856c-6c3e07e2fb0c\n" + role: tool + tool_call_id: call_6PsGSGgsIN4tDkVQjd9ozPOj + model: gpt-4o + stream: false + tool_choice: auto + tools: + - function: + description: Get a list of documentation topics for a GitHub repository + name: read_wiki_structure + parameters: + additionalProperties: false + properties: + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + type: object + strict: true + type: function + - function: + description: View documentation about a GitHub repository + name: read_wiki_contents + parameters: + additionalProperties: false + properties: + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + type: object + strict: true + type: function + - function: + description: Ask any question about a GitHub repository + name: ask_question + parameters: + additionalProperties: false + properties: + question: + description: The question to ask about the repository + type: string + repoName: + description: 'GitHub repository: owner/repo (e.g. "facebook/react")' + type: string + required: + - repoName + - question + type: object + strict: true + type: function + uri: https://api.openai.com/v1/chat/completions + response: + headers: + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-length: + - '1963' + content-type: + - application/json + openai-organization: + - user-grnwlxd1653lxdzp921aoihz + openai-processing-ms: + - '5352' + openai-project: + - proj_FYsIItHHgnSPdHBVMzhNBWGa + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + annotations: [] + content: |- + The `pydantic-ai` repository is a Python framework designed for building production-grade Generative AI applications using Large Language Models (LLMs). It emphasizes a structured, type-safe, and extensible development environment, mirroring the user-friendly experience of Pydantic and FastAPI for AI agent development. + + ### Key Features + - **Type-safe Agents**: Uses `Agent[Deps, Output]` for compile-time validation. + - **Model-agnostic Design**: Supports over 15 LLM providers with a unified interface. + - **Structured Outputs**: Ensures reliable LLM outputs with Pydantic validation. + - **Observability and Tooling**: Includes real-time monitoring, debugging tools, and an evaluation framework. + - **Graph Support**: Allows defining complex execution graphs with type hints. + + The framework operates as a monorepo with core and supporting packages like `pydantic-ai-slim`, `pydantic-graph`, and `pydantic-evals`. Agent execution uses a finite state machine approach, providing robust orchestration and execution mechanisms. + + The repository is actively maintained, supports Python 3.10 to 3.13, and offers comprehensive documentation and examples. + refusal: null + role: assistant + created: 1765405200 + id: chatcmpl-ClMpMlSxMgZD1k7rwYSxuGfkOf700 + model: gpt-4o-2024-08-06 + object: chat.completion + service_tier: default + system_fingerprint: fp_83554c687e + usage: + completion_tokens: 242 + completion_tokens_details: + accepted_prediction_tokens: 0 + audio_tokens: 0 + reasoning_tokens: 0 + rejected_prediction_tokens: 0 + prompt_tokens: 1019 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 0 + total_tokens: 1261 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 94f3f444ca..fc085e5878 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -64,7 +64,6 @@ from temporalio.workflow import ActivityConfig from pydantic_ai.durable_exec.temporal import AgentPlugin, LogfirePlugin, PydanticAIPlugin, TemporalAgent - from pydantic_ai.durable_exec.temporal._dynamic_toolset import TemporalDynamicToolset from pydantic_ai.durable_exec.temporal._function_toolset import TemporalFunctionToolset from pydantic_ai.durable_exec.temporal._mcp_server import TemporalMCPServer from pydantic_ai.durable_exec.temporal._model import TemporalModel @@ -1108,29 +1107,17 @@ async def test_toolset_without_id(): # --- DynamicToolset / @agent.toolset tests --- -def dynamic_toolset_model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: - """Model logic for testing dynamic toolsets.""" - # If no tool calls yet, call the tool - if len(messages) == 1: - return ModelResponse(parts=[ToolCallPart(tool_name='get_dynamic_data', args={'key': 'test'})]) - # After tool call, return final result - return ModelResponse(parts=[TextPart(content='Dynamic result received')]) +def get_dynamic_weather(location: str) -> str: + """Get the weather for a location.""" + return f'The weather in {location} is sunny.' -dynamic_toolset_function_model = FunctionModel(dynamic_toolset_model_logic) - -dynamic_toolset_agent = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent') +dynamic_toolset_agent = Agent(TestModel(), name='dynamic_toolset_agent') @dynamic_toolset_agent.toolset def my_dynamic_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: - toolset = FunctionToolset[None](id='inner_dynamic') - - @toolset.tool - def get_dynamic_data(key: str) -> str: - return f'dynamic_value_for_{key}' - - return toolset + return FunctionToolset(tools=[get_dynamic_weather], id='dynamic_weather') dynamic_toolset_temporal_agent = TemporalAgent( @@ -1147,33 +1134,6 @@ async def run(self, prompt: str) -> str: return result.output -async def test_dynamic_toolset_temporal_agent_structure(): - """Test that DynamicToolset is correctly wrapped by TemporalDynamicToolset.""" - toolsets = dynamic_toolset_temporal_agent.toolsets - - # Should have the dynamic toolset wrapped - assert any(isinstance(t, TemporalDynamicToolset) for t in toolsets) - - # Find the TemporalDynamicToolset - for toolset in toolsets: - if isinstance(toolset, TemporalDynamicToolset): - # Check it has the correct id (defaults to function name) - assert toolset.id == 'my_dynamic_toolset' - # Check it has both get_tools and call_tool activities - activities = toolset.temporal_activities - assert len(activities) == 2 - activity_names: list[str] = [ - name - for a in activities - if (name := ActivityDefinition.must_from_callable(a).name) is not None # pyright: ignore[reportUnknownMemberType] - ] - assert any('get_tools' in name for name in activity_names) - assert any('call_tool' in name for name in activity_names) - break - else: - pytest.fail('TemporalDynamicToolset not found in toolsets') - - async def test_dynamic_toolset_in_workflow(client: Client): """Test that @agent.toolset works correctly in a Temporal workflow.""" async with Worker( @@ -1184,163 +1144,72 @@ async def test_dynamic_toolset_in_workflow(client: Client): ): output = await client.execute_workflow( DynamicToolsetAgentWorkflow.run, - args=['Call the dynamic tool with test key'], + args=['Get the weather for London'], id='test_dynamic_toolset_workflow', task_queue=TASK_QUEUE, ) - assert output == 'Dynamic result received' - - -# Test with explicit id parameter -dynamic_toolset_agent_with_id = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent_with_id') - - -@dynamic_toolset_agent_with_id.toolset(id='custom_toolset_id') -def my_custom_id_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: - toolset = FunctionToolset[None](id='inner') - - @toolset.tool - def get_custom_data(key: str) -> str: - return f'custom_value_for_{key}' - - return toolset + assert output == snapshot('{"get_dynamic_weather":"The weather in a is sunny."}') -dynamic_toolset_with_id_temporal_agent = TemporalAgent( - dynamic_toolset_agent_with_id, - activity_config=BASE_ACTIVITY_CONFIG, -) +async def test_dynamic_toolset_outside_workflow(): + """Test that the dynamic toolset agent works correctly outside of a workflow.""" + result = await dynamic_toolset_temporal_agent.run('Get the weather for Paris') + assert result.output == snapshot('{"get_dynamic_weather":"The weather in a is sunny."}') -async def test_dynamic_toolset_with_custom_id(): - """Test that explicit id parameter is respected.""" - toolsets = dynamic_toolset_with_id_temporal_agent.toolsets +# --- MCP-based DynamicToolset test --- +# Tests that @agent.toolset with an MCP toolset works with Temporal workflows. +# Uses FastMCPToolset (HTTP-based) rather than MCPServerStdio (subprocess-based) because +# MCPServerStdio has issues when created dynamically inside Temporal activities. - for toolset in toolsets: - if isinstance(toolset, TemporalDynamicToolset): - assert toolset.id == 'custom_toolset_id' - break - else: - pytest.fail('TemporalDynamicToolset not found in toolsets') +fastmcp_dynamic_toolset_agent = Agent(model, name='fastmcp_dynamic_toolset_agent') -async def test_dynamic_toolset_get_tools_outside_workflow(): - """Test that get_tools works when called outside a workflow (delegates to wrapped toolset).""" - ctx = RunContext( - deps=None, - model=TestModel(), - usage=RunUsage(), - run_id='test-run', - ) - - for toolset in dynamic_toolset_temporal_agent.toolsets: - if isinstance(toolset, TemporalDynamicToolset): - async with toolset: - tools = await toolset.get_tools(ctx) - assert 'get_dynamic_data' in tools - break - else: - pytest.fail('TemporalDynamicToolset not found') +@fastmcp_dynamic_toolset_agent.toolset(per_run_step=False) +def my_fastmcp_dynamic_toolset(ctx: RunContext[None]) -> FastMCPToolset: + """Dynamic toolset that returns an MCP toolset. -async def test_dynamic_toolset_call_tool_outside_workflow(): - """Test that call_tool works when called outside a workflow (delegates to wrapped toolset).""" - ctx = RunContext( - deps=None, - model=TestModel(), - usage=RunUsage(), - run_id='test-run', - ) - - for toolset in dynamic_toolset_temporal_agent.toolsets: - if isinstance(toolset, TemporalDynamicToolset): - async with toolset: - tools = await toolset.get_tools(ctx) - tool = tools['get_dynamic_data'] - result = await toolset.call_tool('get_dynamic_data', {'key': 'mykey'}, ctx, tool) - assert result == 'dynamic_value_for_mykey' - break - else: - pytest.fail('TemporalDynamicToolset not found') - - -async def test_dynamic_toolset_tool_not_found_in_activity(): - """Test that calling a non-existent tool in the activity raises UserError.""" - from pydantic_ai.durable_exec.temporal._toolset import CallToolParams - - ctx = RunContext( - deps=None, - model=TestModel(), - usage=RunUsage(), - run_id='test-run', - ) - - for toolset in dynamic_toolset_temporal_agent.toolsets: - if isinstance(toolset, TemporalDynamicToolset): - # Serialize the run context as the activity would receive it - serialized_ctx = TemporalRunContext.serialize_run_context(ctx) - - # Create params with a non-existent tool name - params = CallToolParams( - name='nonexistent_tool', - tool_args={'key': 'test'}, - serialized_run_context=serialized_ctx, - tool_def=None, - ) - - # Call the activity directly - this should raise UserError - with pytest.raises(UserError, match="Tool 'nonexistent_tool' not found in dynamic toolset"): - await toolset.call_tool_activity(params, None) - break - else: - pytest.fail('TemporalDynamicToolset not found') - - -# Create a dynamic toolset with activity disabled for testing -dynamic_toolset_agent_disabled = Agent(dynamic_toolset_function_model, name='dynamic_toolset_agent_disabled') - - -@dynamic_toolset_agent_disabled.toolset -def my_dynamic_toolset_disabled(ctx: RunContext[None]) -> FunctionToolset[None]: - toolset = FunctionToolset[None](id='inner_dynamic_disabled') - - @toolset.tool - def get_dynamic_data(key: str) -> str: - return f'disabled_dynamic_value_for_{key}' - - return toolset + This tests MCP lifecycle management (context manager enter/exit) within DynamicToolset + Temporal. + Uses per_run_step=False so the toolset persists across run steps within an activity. + """ + return FastMCPToolset('https://mcp.deepwiki.com/mcp', id='dynamic_deepwiki') -dynamic_toolset_activity_disabled_temporal_agent = TemporalAgent( - dynamic_toolset_agent_disabled, +fastmcp_dynamic_toolset_temporal_agent = TemporalAgent( + fastmcp_dynamic_toolset_agent, activity_config=BASE_ACTIVITY_CONFIG, - tool_activity_config={'my_dynamic_toolset_disabled': {'get_dynamic_data': False}}, ) @workflow.defn -class DynamicToolsetActivityDisabledWorkflow: +class FastMCPDynamicToolsetAgentWorkflow: @workflow.run async def run(self, prompt: str) -> str: - result = await dynamic_toolset_activity_disabled_temporal_agent.run(prompt) + result = await fastmcp_dynamic_toolset_temporal_agent.run(prompt) return result.output -async def test_dynamic_toolset_activity_disabled(client: Client): - """Test that call_tool with activity disabled runs tool directly in workflow.""" +async def test_fastmcp_dynamic_toolset_in_workflow(allow_model_requests: None, client: Client): + """Test that @agent.toolset with FastMCPToolset works in a Temporal workflow. + + This demonstrates MCP lifecycle management (entering/exiting the MCP toolset context manager) + within a DynamicToolset wrapped by TemporalDynamicToolset. + """ async with Worker( client, task_queue=TASK_QUEUE, - workflows=[DynamicToolsetActivityDisabledWorkflow], - plugins=[AgentPlugin(dynamic_toolset_activity_disabled_temporal_agent)], + workflows=[FastMCPDynamicToolsetAgentWorkflow], + plugins=[AgentPlugin(fastmcp_dynamic_toolset_temporal_agent)], ): output = await client.execute_workflow( - DynamicToolsetActivityDisabledWorkflow.run, - args=['Call the dynamic tool with test key'], - id='test_dynamic_toolset_activity_disabled_wf', + FastMCPDynamicToolsetAgentWorkflow.run, + args=['Can you tell me about the pydantic/pydantic-ai repo? Keep it short.'], + id='test_fastmcp_dynamic_toolset_workflow', task_queue=TASK_QUEUE, ) - assert output == 'Dynamic result received' + # The deepwiki MCP server should return info about the pydantic-ai repo + assert 'pydantic' in output.lower() or 'agent' in output.lower() async def test_temporal_agent(): diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index 9a4a344d12..7f06d7630e 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -721,18 +721,28 @@ async def test_visit_and_replace(): ] ) visited_toolset = toolset.visit_and_replace(lambda toolset: WrapperToolset(toolset)) - assert visited_toolset == CombinedToolset( - [ - WrapperToolset(WrapperToolset(toolset1)), - DynamicToolset( - toolset_func=active_dynamic_toolset.toolset_func, - per_run_step=active_dynamic_toolset.per_run_step, - _toolset=WrapperToolset(toolset2), - _run_step=active_dynamic_toolset._run_step, # pyright: ignore[reportPrivateUsage] - ), - WrapperToolset(inactive_dynamic_toolset), - ] - ) + + # Check the structure of the visited toolset + assert isinstance(visited_toolset, CombinedToolset) + assert len(visited_toolset.toolsets) == 3 + + # First toolset is doubly wrapped + assert isinstance(visited_toolset.toolsets[0], WrapperToolset) + assert isinstance(visited_toolset.toolsets[0].wrapped, WrapperToolset) + assert visited_toolset.toolsets[0].wrapped.wrapped is toolset1 + + # Second toolset is a DynamicToolset with wrapped inner toolset + visited_dynamic = visited_toolset.toolsets[1] + assert isinstance(visited_dynamic, DynamicToolset) + assert visited_dynamic.toolset_func is active_dynamic_toolset.toolset_func + assert visited_dynamic.per_run_step == active_dynamic_toolset.per_run_step + assert isinstance(visited_dynamic._toolset, WrapperToolset) # pyright: ignore[reportPrivateUsage] + assert visited_dynamic._toolset.wrapped is toolset2 # pyright: ignore[reportPrivateUsage] + assert visited_dynamic._run_step == active_dynamic_toolset._run_step # pyright: ignore[reportPrivateUsage] + + # Third toolset is the inactive dynamic toolset wrapped + assert isinstance(visited_toolset.toolsets[2], WrapperToolset) + assert visited_toolset.toolsets[2].wrapped is inactive_dynamic_toolset async def test_dynamic_toolset(): @@ -822,3 +832,53 @@ def no_toolset_func(ctx: RunContext[None]) -> None: assert tools == {} assert toolset._toolset is None # pyright: ignore[reportPrivateUsage] + + +def test_dynamic_toolset_id(): + """Test that DynamicToolset can have an id set.""" + + def toolset_func(ctx: RunContext[None]) -> FunctionToolset[None]: + return FunctionToolset() + + # No id by default + toolset_no_id = DynamicToolset[None](toolset_func=toolset_func) + assert toolset_no_id.id is None + + # Explicit id + toolset_with_id = DynamicToolset[None](toolset_func=toolset_func, id='my_dynamic_toolset') + assert toolset_with_id.id == 'my_dynamic_toolset' + + # copy() preserves id + copied = toolset_with_id.copy() + assert copied.id == 'my_dynamic_toolset' + + +def test_agent_toolset_decorator_id(): + """Test that @agent.toolset decorator auto-assigns id from function name or accepts explicit id.""" + from pydantic_ai import Agent + from pydantic_ai.models.test import TestModel + + agent = Agent(TestModel()) + + @agent.toolset + def my_tools(ctx: RunContext[None]) -> FunctionToolset[None]: + return FunctionToolset() + + @agent.toolset(id='custom_id') + def other_tools(ctx: RunContext[None]) -> FunctionToolset[None]: + return FunctionToolset() + + # The toolsets are DynamicToolsets with auto-assigned or custom ids + toolsets = agent.toolsets + assert len(toolsets) == 3 # FunctionToolset for agent tools + 2 dynamic toolsets + + # First is the agent's own FunctionToolset + assert isinstance(toolsets[0], FunctionToolset) + + # Second toolset should have id from function name + assert isinstance(toolsets[1], DynamicToolset) + assert toolsets[1].id == 'my_tools' + + # Third toolset should have explicit id + assert isinstance(toolsets[2], DynamicToolset) + assert toolsets[2].id == 'custom_id' From e0e183da711915d069a4ea8d504dc7f2d50f27b2 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 10 Dec 2025 17:40:09 -0500 Subject: [PATCH 5/7] fix example --- docs/durable_execution/temporal.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index 340dfbbfca..325cbfbaec 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -92,7 +92,7 @@ from pydantic_ai.durable_exec.temporal import ( ) agent = Agent( - 'gpt-5', + 'openai:gpt-5', instructions="You're an expert in geography.", name='geography', # (10)! ) @@ -161,6 +161,17 @@ When `TemporalAgent` dynamically creates activities for the wrapped agent's mode For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter can be set explicitly or it will default to the function name: ```python {test="skip"} +from dataclasses import dataclass + +from pydantic_ai import Agent, RunContext +from pydantic_ai.toolsets import FunctionToolset + +agent = Agent('openai:gpt-5') + +@dataclass +class MyDeps: + ... + @agent.toolset(id='my_dynamic_tools') def my_toolset(ctx: RunContext[MyDeps]) -> FunctionToolset: ... From a3580c8e5756d81d45cf9b2ce42c241a79fec35e Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Wed, 10 Dec 2025 22:36:28 -0500 Subject: [PATCH 6/7] coverage --- .../pydantic_ai/durable_exec/temporal/_dynamic_toolset.py | 2 +- tests/test_toolsets.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py index 0a4fca0fc2..2e3e86768c 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_dynamic_toolset.py @@ -137,7 +137,7 @@ async def call_tool( return await super().call_tool(name, tool_args, ctx, tool) tool_activity_config = self.tool_activity_config.get(name) - if tool_activity_config is False: + if tool_activity_config is False: # pragma: no cover return await super().call_tool(name, tool_args, ctx, tool) merged_config = self.activity_config | (tool_activity_config or {}) diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index 7f06d7630e..302bd3813c 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -838,7 +838,7 @@ def test_dynamic_toolset_id(): """Test that DynamicToolset can have an id set.""" def toolset_func(ctx: RunContext[None]) -> FunctionToolset[None]: - return FunctionToolset() + return FunctionToolset() # pragma: no cover # No id by default toolset_no_id = DynamicToolset[None](toolset_func=toolset_func) @@ -862,11 +862,11 @@ def test_agent_toolset_decorator_id(): @agent.toolset def my_tools(ctx: RunContext[None]) -> FunctionToolset[None]: - return FunctionToolset() + return FunctionToolset() # pragma: no cover @agent.toolset(id='custom_id') def other_tools(ctx: RunContext[None]) -> FunctionToolset[None]: - return FunctionToolset() + return FunctionToolset() # pragma: no cover # The toolsets are DynamicToolsets with auto-assigned or custom ids toolsets = agent.toolsets From c2d05ce49eede751ac10ecd73fa62b60e704181f Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Sat, 13 Dec 2025 12:44:41 -0500 Subject: [PATCH 7/7] apply review fixes --- docs/durable_execution/temporal.md | 21 +----- docs/toolsets.md | 2 - .../pydantic_ai/agent/__init__.py | 4 +- .../pydantic_ai/toolsets/_dynamic.py | 74 ++++++++++++------- tests/test_temporal.py | 39 ++++++---- tests/test_toolsets.py | 30 +++++--- 6 files changed, 93 insertions(+), 77 deletions(-) diff --git a/docs/durable_execution/temporal.md b/docs/durable_execution/temporal.md index 325cbfbaec..3ce48c83e8 100644 --- a/docs/durable_execution/temporal.md +++ b/docs/durable_execution/temporal.md @@ -158,26 +158,7 @@ To ensure that Temporal knows what code to run when an activity fails or is inte When `TemporalAgent` dynamically creates activities for the wrapped agent's model requests and toolsets (specifically those that implement their own tool listing and calling, i.e. [`FunctionToolset`][pydantic_ai.toolsets.FunctionToolset] and [`MCPServer`][pydantic_ai.mcp.MCPServer]), their names are derived from the agent's [`name`][pydantic_ai.agent.AbstractAgent.name] and the toolsets' [`id`s][pydantic_ai.toolsets.AbstractToolset.id]. These fields are normally optional, but are required to be set when using Temporal. They should not be changed once the durable agent has been deployed to production as this would break active workflows. -For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter can be set explicitly or it will default to the function name: - -```python {test="skip"} -from dataclasses import dataclass - -from pydantic_ai import Agent, RunContext -from pydantic_ai.toolsets import FunctionToolset - -agent = Agent('openai:gpt-5') - -@dataclass -class MyDeps: - ... - -@agent.toolset(id='my_dynamic_tools') -def my_toolset(ctx: RunContext[MyDeps]) -> FunctionToolset: - ... -``` - -Note that with Temporal, `per_run_step=False` is not respected, as the toolset always needs to be created on-the-fly in the activity. +For dynamic toolsets created with the [`@agent.toolset`][pydantic_ai.Agent.toolset] decorator, the `id` parameter must be set explicitly. Note that with Temporal, `per_run_step=False` is not respected, as the toolset always needs to be created on-the-fly in the activity. Other than that, any agent and toolset will just work! diff --git a/docs/toolsets.md b/docs/toolsets.md index 6d87ce7840..1b041b3baa 100644 --- a/docs/toolsets.md +++ b/docs/toolsets.md @@ -598,8 +598,6 @@ To register a dynamic toolset, you can pass a function that takes [`RunContext`] By default, the function will be called again ahead of each agent run step. If you are using the decorator, you can optionally provide a `per_run_step=False` argument to indicate that the toolset only needs to be built once for the entire run. -When using [Temporal durable execution](./durable_execution/temporal.md), the decorator also accepts an `id` parameter to uniquely identify the toolset. If not provided, the function name is used as the ID. - ```python {title="dynamic_toolset.py", requires="function_toolset.py"} from dataclasses import dataclass from typing import Literal diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 28b976dc8b..c0affbd74a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1275,12 +1275,10 @@ async def simple_toolset(ctx: RunContext[str]) -> AbstractToolset[str]: per_run_step: Whether to re-evaluate the toolset for each run step. Defaults to True. id: An optional unique ID for the dynamic toolset. Required for use with durable execution environments like Temporal, where the ID identifies the toolset's activities within the workflow. - If not provided, defaults to the function name. """ def toolset_decorator(func_: ToolsetFunc[AgentDepsT]) -> ToolsetFunc[AgentDepsT]: - toolset_id = id if id is not None else func_.__name__ - self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step, id=toolset_id)) + self._dynamic_toolsets.append(DynamicToolset(func_, per_run_step=per_run_step, id=id)) return func_ return toolset_decorator if func is None else toolset_decorator(func) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py index 2679915d15..393d506e59 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/_dynamic.py @@ -2,9 +2,9 @@ import inspect from collections.abc import Awaitable, Callable -from typing import Any, TypeAlias +from typing import Any, Generic, TypeAlias -from typing_extensions import Self +from typing_extensions import Self, TypedDict from .._run_context import AgentDepsT, RunContext from .abstract import AbstractToolset, ToolsetTool @@ -16,18 +16,24 @@ """A sync/async function which takes a run context and returns a toolset.""" +class ToolsetRunStep(TypedDict, Generic[AgentDepsT]): + """State for a DynamicToolset for a specific run.""" + + toolset: AbstractToolset[AgentDepsT] | None + run_step: int | None + + class DynamicToolset(AbstractToolset[AgentDepsT]): """A toolset that dynamically builds a toolset using a function that takes the run context. - It should only be used during a single agent run as it stores the generated toolset. - To use it multiple times, use the `copy()` method. + State is isolated per run using `ctx.run_id` as a key, allowing the same instance + to be safely reused across multiple agent runs. """ toolset_func: ToolsetFunc[AgentDepsT] per_run_step: bool _id: str | None - _toolset: AbstractToolset[AgentDepsT] | None - _run_step: int | None + _toolset_runstep: dict[str, ToolsetRunStep[AgentDepsT]] def __init__( self, @@ -48,8 +54,7 @@ def __init__( self.toolset_func = toolset_func self.per_run_step = per_run_step self._id = id - self._toolset = None - self._run_step = None + self._toolset_runstep = {} @property def id(self) -> str | None: @@ -68,16 +73,25 @@ async def __aenter__(self) -> Self: async def __aexit__(self, *args: Any) -> bool | None: try: - if self._toolset is not None: - return await self._toolset.__aexit__(*args) + result = None + for run_state in self._toolset_runstep.values(): + if run_state['toolset'] is not None: + result = await run_state['toolset'].__aexit__(*args) finally: - self._toolset = None - self._run_step = None + self._toolset_runstep.clear() + return result async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: - if self._toolset is None or (self.per_run_step and ctx.run_step != self._run_step): - if self._toolset is not None: - await self._toolset.__aexit__() + run_id = ctx.run_id or '__default__' + + if run_id not in self._toolset_runstep: + self._toolset_runstep[run_id] = {'toolset': None, 'run_step': None} + + run_state = self._toolset_runstep[run_id] + + if run_state['toolset'] is None or (self.per_run_step and ctx.run_step != run_state['run_step']): + if run_state['toolset'] is not None: + await run_state['toolset'].__aexit__() toolset = self.toolset_func(ctx) if inspect.isawaitable(toolset): @@ -86,33 +100,41 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ if toolset is not None: await toolset.__aenter__() - self._toolset = toolset - self._run_step = ctx.run_step + run_state['toolset'] = toolset + run_state['run_step'] = ctx.run_step - if self._toolset is None: + if run_state['toolset'] is None: return {} - return await self._toolset.get_tools(ctx) + return await run_state['toolset'].get_tools(ctx) async def call_tool( self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT] ) -> Any: - assert self._toolset is not None - return await self._toolset.call_tool(name, tool_args, ctx, tool) + run_id = ctx.run_id or '__default__' + run_state = self._toolset_runstep.get(run_id) + assert run_state is not None and run_state['toolset'] is not None + return await run_state['toolset'].call_tool(name, tool_args, ctx, tool) def apply(self, visitor: Callable[[AbstractToolset[AgentDepsT]], None]) -> None: - if self._toolset is None: + if not self._toolset_runstep: super().apply(visitor) else: - self._toolset.apply(visitor) + for run_state in self._toolset_runstep.values(): + if run_state['toolset'] is not None: + run_state['toolset'].apply(visitor) def visit_and_replace( self, visitor: Callable[[AbstractToolset[AgentDepsT]], AbstractToolset[AgentDepsT]] ) -> AbstractToolset[AgentDepsT]: - if self._toolset is None: + if not self._toolset_runstep: return super().visit_and_replace(visitor) else: new_copy = self.copy() - new_copy._toolset = self._toolset.visit_and_replace(visitor) - new_copy._run_step = self._run_step + for run_id, run_state in self._toolset_runstep.items(): + if run_state['toolset'] is not None: + new_copy._toolset_runstep[run_id] = { + 'toolset': run_state['toolset'].visit_and_replace(visitor), + 'run_step': run_state['run_step'], + } return new_copy diff --git a/tests/test_temporal.py b/tests/test_temporal.py index fc085e5878..e194a1e7df 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1107,17 +1107,25 @@ async def test_toolset_without_id(): # --- DynamicToolset / @agent.toolset tests --- -def get_dynamic_weather(location: str) -> str: - """Get the weather for a location.""" - return f'The weather in {location} is sunny.' +@dataclass +class DynamicToolsetDeps: + user_name: str + + +dynamic_toolset_agent = Agent(TestModel(), name='dynamic_toolset_agent', deps_type=DynamicToolsetDeps) -dynamic_toolset_agent = Agent(TestModel(), name='dynamic_toolset_agent') +@dynamic_toolset_agent.toolset(id='my_dynamic_tools') +def my_dynamic_toolset(ctx: RunContext[DynamicToolsetDeps]) -> FunctionToolset[DynamicToolsetDeps]: + toolset = FunctionToolset[DynamicToolsetDeps](id='dynamic_weather') + @toolset.tool + def get_dynamic_weather(location: str) -> str: + """Get the weather for a location.""" + user = ctx.deps.user_name + return f'Weather in {location} for {user}: sunny.' -@dynamic_toolset_agent.toolset -def my_dynamic_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: - return FunctionToolset(tools=[get_dynamic_weather], id='dynamic_weather') + return toolset dynamic_toolset_temporal_agent = TemporalAgent( @@ -1129,8 +1137,8 @@ def my_dynamic_toolset(ctx: RunContext[None]) -> FunctionToolset[None]: @workflow.defn class DynamicToolsetAgentWorkflow: @workflow.run - async def run(self, prompt: str) -> str: - result = await dynamic_toolset_temporal_agent.run(prompt) + async def run(self, prompt: str, deps: DynamicToolsetDeps) -> str: + result = await dynamic_toolset_temporal_agent.run(prompt, deps=deps) return result.output @@ -1144,21 +1152,24 @@ async def test_dynamic_toolset_in_workflow(client: Client): ): output = await client.execute_workflow( DynamicToolsetAgentWorkflow.run, - args=['Get the weather for London'], + args=['Get the weather for London', DynamicToolsetDeps(user_name='Alice')], id='test_dynamic_toolset_workflow', task_queue=TASK_QUEUE, ) - assert output == snapshot('{"get_dynamic_weather":"The weather in a is sunny."}') + assert output == snapshot('{"get_dynamic_weather":"Weather in a for Alice: sunny."}') async def test_dynamic_toolset_outside_workflow(): """Test that the dynamic toolset agent works correctly outside of a workflow.""" - result = await dynamic_toolset_temporal_agent.run('Get the weather for Paris') - assert result.output == snapshot('{"get_dynamic_weather":"The weather in a is sunny."}') + result = await dynamic_toolset_temporal_agent.run( + 'Get the weather for Paris', deps=DynamicToolsetDeps(user_name='Bob') + ) + assert result.output == snapshot('{"get_dynamic_weather":"Weather in a for Bob: sunny."}') # --- MCP-based DynamicToolset test --- # Tests that @agent.toolset with an MCP toolset works with Temporal workflows. +# See https://github.com/pydantic/pydantic-ai/issues/3390 # Uses FastMCPToolset (HTTP-based) rather than MCPServerStdio (subprocess-based) because # MCPServerStdio has issues when created dynamically inside Temporal activities. @@ -1166,7 +1177,7 @@ async def test_dynamic_toolset_outside_workflow(): fastmcp_dynamic_toolset_agent = Agent(model, name='fastmcp_dynamic_toolset_agent') -@fastmcp_dynamic_toolset_agent.toolset(per_run_step=False) +@fastmcp_dynamic_toolset_agent.toolset(id='fastmcp_toolset', per_run_step=False) def my_fastmcp_dynamic_toolset(ctx: RunContext[None]) -> FastMCPToolset: """Dynamic toolset that returns an MCP toolset. diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index 302bd3813c..c4edab0974 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -709,7 +709,7 @@ async def test_visit_and_replace(): active_dynamic_toolset = DynamicToolset(toolset_func=lambda ctx: toolset2) await active_dynamic_toolset.get_tools(build_run_context(None)) - assert active_dynamic_toolset._toolset is toolset2 # pyright: ignore[reportPrivateUsage] + assert active_dynamic_toolset._toolset_runstep['__default__']['toolset'] is toolset2 # pyright: ignore[reportPrivateUsage] inactive_dynamic_toolset = DynamicToolset(toolset_func=lambda ctx: FunctionToolset()) @@ -736,9 +736,12 @@ async def test_visit_and_replace(): assert isinstance(visited_dynamic, DynamicToolset) assert visited_dynamic.toolset_func is active_dynamic_toolset.toolset_func assert visited_dynamic.per_run_step == active_dynamic_toolset.per_run_step - assert isinstance(visited_dynamic._toolset, WrapperToolset) # pyright: ignore[reportPrivateUsage] - assert visited_dynamic._toolset.wrapped is toolset2 # pyright: ignore[reportPrivateUsage] - assert visited_dynamic._run_step == active_dynamic_toolset._run_step # pyright: ignore[reportPrivateUsage] + assert isinstance(visited_dynamic._toolset_runstep['__default__']['toolset'], WrapperToolset) # pyright: ignore[reportPrivateUsage] + assert visited_dynamic._toolset_runstep['__default__']['toolset'].wrapped is toolset2 # pyright: ignore[reportPrivateUsage] + assert ( + visited_dynamic._toolset_runstep['__default__']['run_step'] # pyright: ignore[reportPrivateUsage] + == active_dynamic_toolset._toolset_runstep['__default__']['run_step'] # pyright: ignore[reportPrivateUsage] + ) # Third toolset is the inactive dynamic toolset wrapped assert isinstance(visited_toolset.toolsets[2], WrapperToolset) @@ -781,14 +784,15 @@ def toolset_factory(ctx: RunContext[None]) -> AbstractToolset[None]: def get_inner_toolset(toolset: DynamicToolset[None] | None) -> EnterableToolset | None: assert toolset is not None - inner_toolset = toolset._toolset # pyright: ignore[reportPrivateUsage] + run_state = toolset._toolset_runstep.get('__default__') # pyright: ignore[reportPrivateUsage] + inner_toolset = run_state['toolset'] if run_state else None assert isinstance(inner_toolset, EnterableToolset) or inner_toolset is None return inner_toolset run_context = build_run_context(None) async with toolset: - assert not toolset._toolset # pyright: ignore[reportPrivateUsage] + assert not toolset._toolset_runstep # pyright: ignore[reportPrivateUsage] # Test that calling get_tools initializes the toolset tools = await toolset.get_tools(run_context) @@ -825,13 +829,15 @@ def no_toolset_func(ctx: RunContext[None]) -> None: assert tools == {} async with toolset: - assert toolset._toolset is None # pyright: ignore[reportPrivateUsage] + run_state = toolset._toolset_runstep.get('__default__') # pyright: ignore[reportPrivateUsage] + assert run_state is None or run_state['toolset'] is None tools = await toolset.get_tools(run_context) assert tools == {} - assert toolset._toolset is None # pyright: ignore[reportPrivateUsage] + run_state = toolset._toolset_runstep.get('__default__') # pyright: ignore[reportPrivateUsage] + assert run_state is not None and run_state['toolset'] is None def test_dynamic_toolset_id(): @@ -854,7 +860,7 @@ def toolset_func(ctx: RunContext[None]) -> FunctionToolset[None]: def test_agent_toolset_decorator_id(): - """Test that @agent.toolset decorator auto-assigns id from function name or accepts explicit id.""" + """Test that @agent.toolset decorator requires explicit id or defaults to None.""" from pydantic_ai import Agent from pydantic_ai.models.test import TestModel @@ -868,16 +874,16 @@ def my_tools(ctx: RunContext[None]) -> FunctionToolset[None]: def other_tools(ctx: RunContext[None]) -> FunctionToolset[None]: return FunctionToolset() # pragma: no cover - # The toolsets are DynamicToolsets with auto-assigned or custom ids + # The toolsets are DynamicToolsets with None or explicit ids toolsets = agent.toolsets assert len(toolsets) == 3 # FunctionToolset for agent tools + 2 dynamic toolsets # First is the agent's own FunctionToolset assert isinstance(toolsets[0], FunctionToolset) - # Second toolset should have id from function name + # Second toolset without explicit id should have None assert isinstance(toolsets[1], DynamicToolset) - assert toolsets[1].id == 'my_tools' + assert toolsets[1].id is None # Third toolset should have explicit id assert isinstance(toolsets[2], DynamicToolset)