From 543d41402717cdb360c60ce7f93a3a923cd1b8ac Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Wed, 3 Dec 2025 13:10:06 +0530 Subject: [PATCH 01/98] initial commit --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 56 ++++++++--- .../pydantic_ai/agent/__init__.py | 17 +++- .../pydantic_ai/agent/abstract.py | 1 + pydantic_ai_slim/pydantic_ai/messages.py | 43 ++++++++- tests/test_agent.py | 95 +++++++++++++++++++ 5 files changed, 197 insertions(+), 15 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 6a14f8b350..d969c900c8 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -11,7 +11,7 @@ from contextvars import ContextVar from copy import deepcopy from dataclasses import field, replace -from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, cast +from typing import TYPE_CHECKING, Any, Final, Generic, Literal, TypeGuard, cast from opentelemetry.trace import Tracer from typing_extensions import TypeVar, assert_never @@ -137,6 +137,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None + prompt_templates: _messages.PromptTemplates | None usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy @@ -494,6 +495,10 @@ async def _prepare_request( # Update the new message index to ensure `result.new_messages()` returns the correct messages ctx.deps.new_message_index -= len(original_history) - len(message_history) + prompt_templates = ctx.deps.prompt_templates + if prompt_templates: + _apply_prompt_templates(message_history, prompt_templates, run_context) + # Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts, # but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries. # See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary @@ -848,6 +853,15 @@ async def process_tool_calls( # noqa: C901 kind = 'unknown' tool_calls_by_kind[kind].append(call) + prompt_templates = ctx.deps.prompt_templates + run_context: RunContext[DepsT] | None = None + if prompt_templates: + run_context = build_run_context(ctx) + + def apply_prompt_template(part: _messages.ToolReturnPart) -> None: + if prompt_templates and run_context is not None: + prompt_templates.apply_template(part, run_context) + # First, we handle output tool calls for call in tool_calls_by_kind['output']: if final_result: @@ -856,14 +870,18 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, + prompt_template_type='final_result', ) + apply_prompt_template(part) else: yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, content='Output tool not used - a final result was already processed.', tool_call_id=call.tool_call_id, + prompt_template_type='output_tool_not_used', ) + apply_prompt_template(part) yield _messages.FunctionToolResultEvent(part) output_parts.append(part) @@ -887,21 +905,24 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, + prompt_template_type='final_result', ) output_parts.append(part) + apply_prompt_template(part) final_result = result.FinalResult(result_data, call.tool_name, call.tool_call_id) # Then, we handle function tool calls calls_to_run: list[_messages.ToolCallPart] = [] if final_result and ctx.deps.end_strategy == 'early': for call in tool_calls_by_kind['function']: - output_parts.append( - _messages.ToolReturnPart( - tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', - tool_call_id=call.tool_call_id, - ) + part = _messages.ToolReturnPart( + tool_name=call.tool_name, + content='Tool not executed - a final result was already processed.', + tool_call_id=call.tool_call_id, + prompt_template_type='tool_not_executed', ) + apply_prompt_template(part) + output_parts.append(part) else: calls_to_run.extend(tool_calls_by_kind['function']) @@ -953,13 +974,14 @@ async def process_tool_calls( # noqa: C901 # we shouldn't insert return parts as the deferred tools will still get a real result. if not isinstance(final_result.output, _output.DeferredToolRequests): for call in calls: - output_parts.append( - _messages.ToolReturnPart( - tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', - tool_call_id=call.tool_call_id, - ) + part = _messages.ToolReturnPart( + tool_name=call.tool_name, + content='Tool not executed - a final result was already processed.', + tool_call_id=call.tool_call_id, + prompt_template_type='tool_not_executed', ) + apply_prompt_template(part) + output_parts.append(part) elif calls: deferred_calls['external'].extend(tool_calls_by_kind['external']) deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved']) @@ -1346,3 +1368,11 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess else: clean_messages.append(message) return clean_messages + + +def _apply_prompt_templates( + messages: list[_messages.ModelMessage], prompt_templates: _messages.PromptTemplates, ctx: RunContext[Any] +): + for msg in messages: + for msg_part in msg.parts: + prompt_templates.apply_template(msg_part, ctx) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index c8208ac9e6..c2942eacf0 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -124,6 +124,9 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): be merged with this value, with the runtime argument taking priority. """ + prompt_templates: _messages.PromptTemplates | None + """Optional prompt templates used to customize the system-injected messages for this agent.""" + _output_type: OutputSpec[OutputDataT] instrument: InstrumentationSettings | bool | None @@ -166,6 +169,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -218,6 +222,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -251,6 +256,8 @@ def __init__( name: The name of the agent, used for logging. If `None`, we try to infer the agent name from the call frame when the agent is first run. model_settings: Optional model request settings to use for this agent's runs, by default. + prompt_templates: Optional prompt templates to customize how system-injected messages + (like retry prompts or tool return wrappers) are rendered for this agent. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. validation_context: Pydantic [validation context](https://docs.pydantic.dev/latest/concepts/validators/#validation-context) used to validate tool arguments and outputs. @@ -294,6 +301,7 @@ def __init__( self._name = name self.end_strategy = end_strategy self.model_settings = model_settings + self.prompt_templates = prompt_templates self._output_type = output_type self.instrument = instrument @@ -409,7 +417,7 @@ def event_stream_handler(self) -> EventStreamHandler[AgentDepsT] | None: return self._event_stream_handler def __repr__(self) -> str: - return f'{type(self).__name__}(model={self.model!r}, name={self.name!r}, end_strategy={self.end_strategy!r}, model_settings={self.model_settings!r}, output_type={self.output_type!r}, instrument={self.instrument!r})' + return f'{type(self).__name__}(model={self.model!r}, name={self.name!r}, end_strategy={self.end_strategy!r}, model_settings={self.model_settings!r}, prompt_templates={self.prompt_templates!r},output_type={self.output_type!r}, instrument={self.instrument!r})' @overload def iter( @@ -423,6 +431,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -442,6 +451,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -461,6 +471,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -537,6 +548,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are + phrased for this specific run, falling back to the agent's defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -587,6 +600,7 @@ async def main(): merged_settings = merge_model_settings(model_used.settings, self.model_settings) model_settings = merge_model_settings(merged_settings, model_settings) usage_limits = usage_limits or _usage.UsageLimits() + prompt_templates = prompt_templates or self.prompt_templates instructions_literal, instructions_functions = self._get_instructions(additional_instructions=instructions) @@ -614,6 +628,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: new_message_index=len(message_history) if message_history else 0, model=model_used, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, max_result_retries=self._max_result_retries, end_strategy=self.end_strategy, diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 567b61dff6..6e760b7e2a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -134,6 +134,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index ac0fb0da6d..92f90c9184 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -3,7 +3,7 @@ import base64 import hashlib from abc import ABC, abstractmethod -from collections.abc import Sequence +from collections.abc import Callable, Sequence from dataclasses import KW_ONLY, dataclass, field, replace from datetime import datetime from mimetypes import guess_type @@ -23,6 +23,7 @@ from .usage import RequestUsage if TYPE_CHECKING: + from ._run_context import RunContext as _RunContext from .models.instrumented import InstrumentationSettings @@ -932,8 +933,13 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" + pre_compiled: str | None = None + def model_response(self) -> str: """Return a string message describing why the retry is requested.""" + if self.pre_compiled: + return self.pre_compiled + if isinstance(self.content, str): if self.tool_name is None: description = f'Validation feedback:\n{self.content}' @@ -1896,3 +1902,38 @@ class BuiltinToolResultEvent: AgentStreamEvent = Annotated[ModelResponseStreamEvent | HandleResponseEvent, pydantic.Discriminator('event_kind')] """An event in the agent stream: model response stream events and response-handling events.""" + + +@dataclass +class PromptTemplates: + """Templates for specific message parts that Pydantic AI injects. + + Each template can be either: + - A static string: Simple replacement for the default message + - A callable: Dynamic formatting based on RunContext + """ + + retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + """Template for RetryPromptPart messages when injecting retry instructions.""" + + tool_final_result: str | Callable[[ToolReturnPart, Any[Any]], str] | None = None + + def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): + if isinstance(message, ToolReturnPart): + if not self.tool_final_result: + return + # Apply tool return template + if isinstance(self.tool_final_result, str): + message.content = self.tool_final_result + return + + message.content = self.tool_final_result(message, ctx) + elif isinstance(message, RetryPromptPart): + if not self.retry_prompt: + return '' + # Apply RetryPromptPart + if isinstance(self.retry_prompt, str): + message.pre_compiled = self.retry_prompt + return + message.pre_compiled = self.retry_prompt(message, ctx) + diff --git a/tests/test_agent.py b/tests/test_agent.py index c912334434..6afb5af931 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -58,6 +58,7 @@ ) from pydantic_ai.agent import AgentRunResult, WrapperAgent from pydantic_ai.builtin_tools import CodeExecutionTool, MCPServerTool, WebSearchTool +from pydantic_ai.messages import PromptTemplates from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import OutputObjectDefinition, StructuredDict, ToolOutput @@ -285,6 +286,100 @@ def check_b(cls, v: str) -> str: Fix the errors and try again.""") +def test_result_pydantic_model_validation_error_prompt_templates(): + def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.output_tools is not None + if len(messages) == 1: + args_json = '{"a": 1, "b": "foo"}' + else: + args_json = '{"a": 1, "b": "bar"}' + return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)]) + + class Bar(BaseModel): + a: int + b: str + + @field_validator('b') + def check_b(cls, v: str) -> str: + if v == 'foo': + raise ValueError('must not be foo') + return v + + prompt_templates = PromptTemplates(retry_prompt=lambda msg, _: f'IMPORTANT: {msg.content}') + + agent = Agent(FunctionModel(return_model), output_type=Bar, prompt_templates=prompt_templates) + + print('\nAgent prompt templates', agent.prompt_templates) + + result = agent.run_sync('Hello') + assert isinstance(result.output, Bar) + assert result.output.model_dump() == snapshot({'a': 1, 'b': 'bar'}) + messages_part_kinds = [(m.kind, [p.part_kind for p in m.parts]) for m in result.all_messages()] + assert messages_part_kinds == snapshot( + [ + ('request', ['user-prompt']), + ('response', ['tool-call']), + ('request', ['retry-prompt']), + ('response', ['tool-call']), + ('request', ['tool-return']), + ] + ) + + user_retry = result.all_messages()[2] + assert isinstance(user_retry, ModelRequest) + retry_prompt = user_retry.parts[0] + assert isinstance(retry_prompt, RetryPromptPart) + print('\n Retry Prompt ', retry_prompt) + assert retry_prompt.model_response() == snapshot( + "IMPORTANT: [{'type': 'value_error', 'loc': ('b',), 'msg': 'Value error, must not be foo', 'input': 'foo'}]" + ) + + +def test_tool_return_prompt_templates(): + class Output(BaseModel): + value: str + + def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.output_tools is not None + final_tool = info.output_tools[0].name + return ModelResponse( + parts=[ + ToolCallPart(final_tool, {'value': 'first'}), + ToolCallPart(final_tool, {'value': 'second'}), + ToolCallPart('regular_tool', {'x': 1}), + ] + ) + + prompt_templates = PromptTemplates( + tool_final_result='FINISHED', + tool_output_not_used=lambda part, _: f'SKIPPED OUTPUT {part.tool_name}', + tool_not_executed=lambda part, _: f'SKIPPED TOOL {part.tool_name}', + ) + + agent = Agent( + FunctionModel(return_model), + output_type=Output, + end_strategy='early', + prompt_templates=prompt_templates, + ) + + @agent.tool_plain + def regular_tool(x: int) -> int: # pragma: no cover + return x + + result = agent.run_sync('prompt template test') + assert result.output.value == 'first' + + final_request = result.new_messages()[-1] + assert isinstance(final_request, ModelRequest) + tool_returns = [part for part in final_request.parts if isinstance(part, ToolReturnPart)] + assert [part.content for part in tool_returns] == [ + 'FINISHED', + 'SKIPPED OUTPUT final_result', + 'SKIPPED TOOL regular_tool', + ] + + def test_output_validator(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From c5686de5ac2e850183e1b001b07fd5e859c0a789 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Wed, 3 Dec 2025 18:48:43 +0530 Subject: [PATCH 02/98] Removing from here for the moment --- pydantic_ai_slim/pydantic_ai/agent/abstract.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 6e760b7e2a..567b61dff6 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -134,7 +134,6 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, From 8d9d9b9b0e85d0658d7f981422f0faf416dc2475 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 09:59:49 +0530 Subject: [PATCH 03/98] Adding prompt_templates to public APIs in Agent.run family --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 2 +- .../pydantic_ai/agent/__init__.py | 12 ++++++ .../pydantic_ai/agent/abstract.py | 37 +++++++++++++++++++ pydantic_ai_slim/pydantic_ai/messages.py | 3 +- 4 files changed, 51 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index d969c900c8..c37a6b70d2 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -11,7 +11,7 @@ from contextvars import ContextVar from copy import deepcopy from dataclasses import field, replace -from typing import TYPE_CHECKING, Any, Final, Generic, Literal, TypeGuard, cast +from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, cast from opentelemetry.trace import Tracer from typing_extensions import TypeVar, assert_never diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index c2942eacf0..3134ff7468 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -364,6 +364,9 @@ def __init__( self._override_instructions: ContextVar[ _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] ] = ContextVar('_override_instructions', default=None) + self._override_prompt_templates: ContextVar[ + _utils.Option[_messages.PromptTemplates] + ] = ContextVar('_override_prompt_templates', default=None) self._enter_lock = Lock() self._entered_count = 0 @@ -763,6 +766,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -776,6 +780,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates passed to the agent constructor and agent run. """ if _utils.is_set(name): name_token = self._override_name.set(_utils.Some(name)) @@ -808,6 +813,11 @@ def override( else: instructions_token = None + if _utils.is_set(prompt_templates): + prompt_templates_token = self._override_prompt_templates.set(_utils.Some(prompt_templates)) + else: + prompt_templates_token = None + try: yield finally: @@ -823,6 +833,8 @@ def override( self._override_tools.reset(tools_token) if instructions_token is not None: self._override_instructions.reset(instructions_token) + if prompt_templates_token is not None: + self._override_prompt_templates.reset(prompt_templates_token) @overload def instructions( diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 567b61dff6..07e32ef3cd 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -134,6 +134,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -154,6 +155,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -173,6 +175,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -207,6 +210,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -231,6 +236,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, toolsets=toolsets, @@ -258,6 +264,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -278,6 +285,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -297,6 +305,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -330,6 +339,8 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -353,6 +364,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -374,6 +386,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -394,6 +407,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -414,6 +428,7 @@ async def run_stream( # noqa: C901 instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -455,6 +470,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -484,6 +501,7 @@ async def main(): deps=deps, instructions=instructions, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -606,6 +624,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -625,6 +644,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -643,6 +663,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -686,6 +707,8 @@ def main(): model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -710,6 +733,7 @@ async def _consume_stream(): model=model, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -734,6 +758,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -753,6 +778,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -771,6 +797,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -821,6 +848,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -846,6 +875,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, toolsets=toolsets, @@ -863,6 +893,7 @@ async def _run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -889,6 +920,7 @@ async def run_agent() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -918,6 +950,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -937,6 +970,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -957,6 +991,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -1033,6 +1068,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 92f90c9184..5e87b861e1 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1926,7 +1926,7 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru if isinstance(self.tool_final_result, str): message.content = self.tool_final_result return - + message.content = self.tool_final_result(message, ctx) elif isinstance(message, RetryPromptPart): if not self.retry_prompt: @@ -1936,4 +1936,3 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru message.pre_compiled = self.retry_prompt return message.pre_compiled = self.retry_prompt(message, ctx) - From 4968c1fdf3f4b644450636cefe1b4da887cb07ac Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 10:00:04 +0530 Subject: [PATCH 04/98] lint --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 3134ff7468..b1c1078bad 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -364,9 +364,9 @@ def __init__( self._override_instructions: ContextVar[ _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] ] = ContextVar('_override_instructions', default=None) - self._override_prompt_templates: ContextVar[ - _utils.Option[_messages.PromptTemplates] - ] = ContextVar('_override_prompt_templates', default=None) + self._override_prompt_templates: ContextVar[_utils.Option[_messages.PromptTemplates]] = ContextVar( + '_override_prompt_templates', default=None + ) self._enter_lock = Lock() self._entered_count = 0 From 5d041265307c31b1ffbda3f8f743c50990eaa81c Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 10:16:23 +0530 Subject: [PATCH 05/98] docstring --- pydantic_ai_slim/pydantic_ai/messages.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 5e87b861e1..ea592fa5c5 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1914,9 +1914,12 @@ class PromptTemplates: """ retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None - """Template for RetryPromptPart messages when injecting retry instructions.""" + """Override text for [`RetryPromptPart`][pydantic_ai.messages.RetryPromptPart] that Pydantic AI inserts before re-asking the model + after validation failures. Callables receive the retry part and run context to generate custom guidance.""" tool_final_result: str | Callable[[ToolReturnPart, Any[Any]], str] | None = None + """Override how tool return confirmations (final tool messages that wrap up a run) are phrased. Callables receive the + [`ToolReturnPart`][pydantic_ai.messages.ToolReturnPart] and run context to produce dynamic messaging.""" def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): if isinstance(message, ToolReturnPart): From b901be714001826324f175847fc3ad2f21a8a998 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 12:04:54 +0530 Subject: [PATCH 06/98] fix --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c37a6b70d2..4f269b6e90 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -854,13 +854,6 @@ async def process_tool_calls( # noqa: C901 tool_calls_by_kind[kind].append(call) prompt_templates = ctx.deps.prompt_templates - run_context: RunContext[DepsT] | None = None - if prompt_templates: - run_context = build_run_context(ctx) - - def apply_prompt_template(part: _messages.ToolReturnPart) -> None: - if prompt_templates and run_context is not None: - prompt_templates.apply_template(part, run_context) # First, we handle output tool calls for call in tool_calls_by_kind['output']: @@ -870,18 +863,14 @@ def apply_prompt_template(part: _messages.ToolReturnPart) -> None: tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, - prompt_template_type='final_result', ) - apply_prompt_template(part) else: yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, content='Output tool not used - a final result was already processed.', tool_call_id=call.tool_call_id, - prompt_template_type='output_tool_not_used', ) - apply_prompt_template(part) yield _messages.FunctionToolResultEvent(part) output_parts.append(part) @@ -905,10 +894,8 @@ def apply_prompt_template(part: _messages.ToolReturnPart) -> None: tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, - prompt_template_type='final_result', ) output_parts.append(part) - apply_prompt_template(part) final_result = result.FinalResult(result_data, call.tool_name, call.tool_call_id) # Then, we handle function tool calls @@ -919,9 +906,7 @@ def apply_prompt_template(part: _messages.ToolReturnPart) -> None: tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, - prompt_template_type='tool_not_executed', ) - apply_prompt_template(part) output_parts.append(part) else: calls_to_run.extend(tool_calls_by_kind['function']) @@ -978,9 +963,7 @@ def apply_prompt_template(part: _messages.ToolReturnPart) -> None: tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, - prompt_template_type='tool_not_executed', ) - apply_prompt_template(part) output_parts.append(part) elif calls: deferred_calls['external'].extend(tool_calls_by_kind['external']) From ea4a9b8ab4294f38c772bc3fb2f4b3f508eb5e4e Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 12:23:53 +0530 Subject: [PATCH 07/98] remove test --- tests/test_agent.py | 45 --------------------------------------------- 1 file changed, 45 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index 6afb5af931..72d35d742a 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -335,51 +335,6 @@ def check_b(cls, v: str) -> str: ) -def test_tool_return_prompt_templates(): - class Output(BaseModel): - value: str - - def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: - assert info.output_tools is not None - final_tool = info.output_tools[0].name - return ModelResponse( - parts=[ - ToolCallPart(final_tool, {'value': 'first'}), - ToolCallPart(final_tool, {'value': 'second'}), - ToolCallPart('regular_tool', {'x': 1}), - ] - ) - - prompt_templates = PromptTemplates( - tool_final_result='FINISHED', - tool_output_not_used=lambda part, _: f'SKIPPED OUTPUT {part.tool_name}', - tool_not_executed=lambda part, _: f'SKIPPED TOOL {part.tool_name}', - ) - - agent = Agent( - FunctionModel(return_model), - output_type=Output, - end_strategy='early', - prompt_templates=prompt_templates, - ) - - @agent.tool_plain - def regular_tool(x: int) -> int: # pragma: no cover - return x - - result = agent.run_sync('prompt template test') - assert result.output.value == 'first' - - final_request = result.new_messages()[-1] - assert isinstance(final_request, ModelRequest) - tool_returns = [part for part in final_request.parts if isinstance(part, ToolReturnPart)] - assert [part.content for part in tool_returns] == [ - 'FINISHED', - 'SKIPPED OUTPUT final_result', - 'SKIPPED TOOL regular_tool', - ] - - def test_output_validator(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From 933022f3cb0d7fd28c8f44de1042561720543a33 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 12:28:54 +0530 Subject: [PATCH 08/98] removing unused part --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 4f269b6e90..75e9b3928e 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -773,6 +773,11 @@ def _handle_final_result( ) -> End[result.FinalResult[NodeRunEndT]]: messages = ctx.state.message_history + if tool_responses and ctx.deps.prompt_templates: + run_ctx = build_run_context(ctx) + for part in tool_responses: + ctx.deps.prompt_templates.apply_template(part, run_ctx) + # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) @@ -853,8 +858,6 @@ async def process_tool_calls( # noqa: C901 kind = 'unknown' tool_calls_by_kind[kind].append(call) - prompt_templates = ctx.deps.prompt_templates - # First, we handle output tool calls for call in tool_calls_by_kind['output']: if final_result: From 6cc9b1dda8911ec89231882c58798cf32ce65254 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 22:05:43 +0530 Subject: [PATCH 09/98] fixing test --- pydantic_ai_slim/pydantic_ai/messages.py | 66 +++++++++++------- tests/test_agent.py | 87 ++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 24 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index ea592fa5c5..443fde341f 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1906,36 +1906,54 @@ class BuiltinToolResultEvent: @dataclass class PromptTemplates: - """Templates for specific message parts that Pydantic AI injects. + """Templates for customizing messages that Pydantic AI sends to models. - Each template can be either: - - A static string: Simple replacement for the default message - - A callable: Dynamic formatting based on RunContext + Each template can be a static string or a callable that receives context and returns a string. """ retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None - """Override text for [`RetryPromptPart`][pydantic_ai.messages.RetryPromptPart] that Pydantic AI inserts before re-asking the model - after validation failures. Callables receive the retry part and run context to generate custom guidance.""" + """Message sent to the model after validation failures or invalid responses. + + Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." + """ + + tool_final_result: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Confirmation message sent when a final result is successfully processed. + + Default: "Final result processed." + """ + + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when an output tool call is skipped because a result was already found. + + Default: "Output tool not used - a final result was already processed." + """ + + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when a function tool call is skipped because a result was already found. + + Default: "Tool not executed - a final result was already processed." + """ - tool_final_result: str | Callable[[ToolReturnPart, Any[Any]], str] | None = None - """Override how tool return confirmations (final tool messages that wrap up a run) are phrased. Callables receive the - [`ToolReturnPart`][pydantic_ai.messages.ToolReturnPart] and run context to produce dynamic messaging.""" def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): if isinstance(message, ToolReturnPart): - if not self.tool_final_result: - return - # Apply tool return template - if isinstance(self.tool_final_result, str): - message.content = self.tool_final_result - return - - message.content = self.tool_final_result(message, ctx) + if message.content == 'Final result processed.' and self.tool_final_result: + self._apply_tool_template(message, ctx, self.tool_final_result) + elif message.content == 'Output tool not used - a final result was already processed.' and self.output_tool_not_executed: + self._apply_tool_template(message, ctx, self.output_tool_not_executed) + elif message.content == 'Tool not executed - a final result was already processed.' and self.function_tool_not_executed: + self._apply_tool_template(message, ctx, self.function_tool_not_executed) + elif isinstance(message, RetryPromptPart): - if not self.retry_prompt: - return '' - # Apply RetryPromptPart - if isinstance(self.retry_prompt, str): - message.pre_compiled = self.retry_prompt - return - message.pre_compiled = self.retry_prompt(message, ctx) + if self.retry_prompt: + if isinstance(self.retry_prompt, str): + message.pre_compiled = self.retry_prompt + else: + message.pre_compiled = self.retry_prompt(message, ctx) + + def _apply_tool_template(self, message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[Any, _RunContext[Any]], str]): + if isinstance(template, str): + message.content = template + else: + message.content = template(message, ctx) \ No newline at end of file diff --git a/tests/test_agent.py b/tests/test_agent.py index 72d35d742a..f0011a054e 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -229,6 +229,93 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert result.all_messages_json().startswith(b'[{"parts":[{"content":"Hello",') +def test_prompt_templates(): + """Test both retry_prompt and tool_final_result templates.""" + + def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.output_tools is not None + if len(messages) == 1: + args_json = '{"a": "wrong", "b": "foo"}' + else: + args_json = '{"a": 42, "b": "foo"}' + return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)]) + + agent = Agent( + FunctionModel(return_model), + output_type=Foo, + prompt_templates=PromptTemplates( + retry_prompt=lambda part, ctx: f'Custom retry message {ctx.run_id} {part.content}', + tool_final_result=lambda part, ctx: f'Custom final result {ctx.run_id} {part.content}', + ), + ) + + result = agent.run_sync('Hello') + assert result.output.model_dump() == {'a': 42, 'b': 'foo'} + + retry_request = result.all_messages()[2] + assert isinstance(retry_request, ModelRequest) + retry_part = retry_request.parts[0] + assert isinstance(retry_part, RetryPromptPart) + # Verify the custom template includes run_id and content + response = retry_part.model_response() + assert response.startswith('Custom retry message ') + assert result.run_id in response + assert "[{'type': 'int_parsing'" in response + + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), + ModelResponse( + parts=[ToolCallPart(tool_name='final_result', args='{"a": "wrong", "b": "foo"}', tool_call_id=IsStr())], + usage=RequestUsage(input_tokens=51, output_tokens=7), + model_name='function:return_model:', + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + RetryPromptPart( + tool_name='final_result', + content=[ + { + 'type': 'int_parsing', + 'loc': ('a',), + 'msg': 'Input should be a valid integer, unable to parse string as an integer', + 'input': 'wrong', + } + ], + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + pre_compiled="Custom retry message 630a3726-d848-4328-9824-035676e55100 [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], + usage=RequestUsage(input_tokens=76, output_tokens=14), + model_name='function:return_model:', + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='final_result', + content='Custom final result', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + ) + ], + run_id=IsStr(), + ), + ] + ) + + def test_result_pydantic_model_validation_error(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From c8ebcea03f8b9bbeba4a0b445528eb562191201f Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Thu, 4 Dec 2025 22:10:41 +0530 Subject: [PATCH 10/98] format --- pydantic_ai_slim/pydantic_ai/messages.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 443fde341f..f40f303b7f 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1935,16 +1935,21 @@ class PromptTemplates: Default: "Tool not executed - a final result was already processed." """ - def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): if isinstance(message, ToolReturnPart): if message.content == 'Final result processed.' and self.tool_final_result: self._apply_tool_template(message, ctx, self.tool_final_result) - elif message.content == 'Output tool not used - a final result was already processed.' and self.output_tool_not_executed: + elif ( + message.content == 'Output tool not used - a final result was already processed.' + and self.output_tool_not_executed + ): self._apply_tool_template(message, ctx, self.output_tool_not_executed) - elif message.content == 'Tool not executed - a final result was already processed.' and self.function_tool_not_executed: + elif ( + message.content == 'Tool not executed - a final result was already processed.' + and self.function_tool_not_executed + ): self._apply_tool_template(message, ctx, self.function_tool_not_executed) - + elif isinstance(message, RetryPromptPart): if self.retry_prompt: if isinstance(self.retry_prompt, str): @@ -1952,8 +1957,10 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru else: message.pre_compiled = self.retry_prompt(message, ctx) - def _apply_tool_template(self, message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[Any, _RunContext[Any]], str]): + def _apply_tool_template( + self, message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[Any, _RunContext[Any]], str] + ): if isinstance(template, str): message.content = template else: - message.content = template(message, ctx) \ No newline at end of file + message.content = template(message, ctx) From 7eaa90bd5f4a9c21c3b0ba797eec2d39595e2e47 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 5 Dec 2025 21:12:44 +0530 Subject: [PATCH 11/98] fix --- pydantic_ai_slim/pydantic_ai/messages.py | 10 +++++----- tests/test_agent.py | 10 ++++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index f40f303b7f..e40c3f0b0e 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -933,12 +933,12 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" - pre_compiled: str | None = None + retry_template: str | None = None def model_response(self) -> str: """Return a string message describing why the retry is requested.""" - if self.pre_compiled: - return self.pre_compiled + if self.retry_template: + return self.retry_template if isinstance(self.content, str): if self.tool_name is None: @@ -1953,9 +1953,9 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru elif isinstance(message, RetryPromptPart): if self.retry_prompt: if isinstance(self.retry_prompt, str): - message.pre_compiled = self.retry_prompt + message.retry_template = self.retry_prompt else: - message.pre_compiled = self.retry_prompt(message, ctx) + message.retry_template = self.retry_prompt(message, ctx) def _apply_tool_template( self, message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[Any, _RunContext[Any]], str] diff --git a/tests/test_agent.py b/tests/test_agent.py index f0011a054e..b0fba7f1d6 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -289,7 +289,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - pre_compiled="Custom retry message 630a3726-d848-4328-9824-035676e55100 [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", + retry_template="Custom retry message 630a3726-d848-4328-9824-035676e55100 [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", ) ], run_id=IsStr(), @@ -5429,9 +5429,15 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ) model = FunctionModel(model_function) - agent = Agent(model, system_prompt='You are a helpful assistant.') + prompt_templates = PromptTemplates(validation_retry_prompt='HENLU') + agent = Agent(model, system_prompt='You are a helpful assistant.', prompt_templates=prompt_templates) result = await agent.run('Hello') + retry_part = result.all_messages()[2].parts[0] + + response = retry_part.model_response() + + assert response == snapshot('HENLU') assert result.all_messages() == snapshot( [ From 16c4d92a66ae408bbebaae32b476b7551000cb79 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 09:15:33 +0530 Subject: [PATCH 12/98] Adding return kind to ToolReturnPart --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 7 ++ pydantic_ai_slim/pydantic_ai/messages.py | 30 +++-- tests/test_agent.py | 120 ++++++++++++++++--- 3 files changed, 128 insertions(+), 29 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 75e9b3928e..03fd4f18f7 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -866,6 +866,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, + return_kind='final-result-processed', ) else: yield _messages.FunctionToolCallEvent(call) @@ -873,6 +874,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Output tool not used - a final result was already processed.', tool_call_id=call.tool_call_id, + return_kind='output-tool-not-executed', ) yield _messages.FunctionToolResultEvent(part) @@ -897,6 +899,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Final result processed.', tool_call_id=call.tool_call_id, + return_kind='final-result-processed', ) output_parts.append(part) final_result = result.FinalResult(result_data, call.tool_name, call.tool_call_id) @@ -909,6 +912,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, + return_kind='function-tool-not-executed', ) output_parts.append(part) else: @@ -966,6 +970,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, + return_kind='function-tool-not-executed', ) output_parts.append(part) elif calls: @@ -1122,6 +1127,7 @@ async def _call_tool( tool_name=tool_call.tool_name, content=tool_call_result.message, tool_call_id=tool_call.tool_call_id, + return_kind='tool-denied', ), None elif isinstance(tool_call_result, exceptions.ModelRetry): m = _messages.RetryPromptPart( @@ -1184,6 +1190,7 @@ async def _call_tool( tool_call_id=tool_call.tool_call_id, content=tool_return.return_value, # type: ignore metadata=tool_return.metadata, + return_kind='tool-executed', ) return return_part, tool_return.content or None diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index e40c3f0b0e..9c787b73c5 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -871,6 +871,18 @@ class ToolReturnPart(BaseToolReturnPart): part_kind: Literal['tool-return'] = 'tool-return' """Part type identifier, this is available on all parts as a discriminator.""" + return_kind: ( + Literal[ + 'final-result-processed', + 'output-tool-not-executed', + 'function-tool-not-executed', + 'tool-executed', + 'tool-denied', + ] + | None + ) = None + """The kind of tool return such as final result processed, output tool not executed, etc. This is available on all parts as a discriminator.""" + @dataclass(repr=False) class BuiltinToolReturnPart(BaseToolReturnPart): @@ -934,10 +946,12 @@ class RetryPromptPart: """Part type identifier, this is available on all parts as a discriminator.""" retry_template: str | None = None + """Message compiled using the provided prompt template. This message will be sent to the model to retry if present.""" def model_response(self) -> str: """Return a string message describing why the retry is requested.""" if self.retry_template: + # We added this based on a provided prompt template so let us use this instead of our usual string return self.retry_template if isinstance(self.content, str): @@ -1917,7 +1931,7 @@ class PromptTemplates: Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ - tool_final_result: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Confirmation message sent when a final result is successfully processed. Default: "Final result processed." @@ -1937,17 +1951,11 @@ class PromptTemplates: def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): if isinstance(message, ToolReturnPart): - if message.content == 'Final result processed.' and self.tool_final_result: - self._apply_tool_template(message, ctx, self.tool_final_result) - elif ( - message.content == 'Output tool not used - a final result was already processed.' - and self.output_tool_not_executed - ): + if message.return_kind == 'final-result-processed' and self.final_result_processed: + self._apply_tool_template(message, ctx, self.final_result_processed) + elif message.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: self._apply_tool_template(message, ctx, self.output_tool_not_executed) - elif ( - message.content == 'Tool not executed - a final result was already processed.' - and self.function_tool_not_executed - ): + elif message.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: self._apply_tool_template(message, ctx, self.function_tool_not_executed) elif isinstance(message, RetryPromptPart): diff --git a/tests/test_agent.py b/tests/test_agent.py index b0fba7f1d6..074f3e86c8 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -220,6 +220,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -230,7 +231,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates(): - """Test both retry_prompt and tool_final_result templates.""" + """Test both retry_prompt and final_result_processed templates.""" def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None @@ -244,8 +245,8 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse FunctionModel(return_model), output_type=Foo, prompt_templates=PromptTemplates( - retry_prompt=lambda part, ctx: f'Custom retry message {ctx.run_id} {part.content}', - tool_final_result=lambda part, ctx: f'Custom final result {ctx.run_id} {part.content}', + retry_prompt=lambda part, ctx: f'Custom retry message {part.content}', + final_result_processed=lambda part, ctx: f'Custom final result {part.content}', ), ) @@ -258,8 +259,6 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert isinstance(retry_part, RetryPromptPart) # Verify the custom template includes run_id and content response = retry_part.model_response() - assert response.startswith('Custom retry message ') - assert result.run_id in response assert "[{'type': 'int_parsing'" in response assert result.all_messages() == snapshot( @@ -289,14 +288,14 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_template="Custom retry message 630a3726-d848-4328-9824-035676e55100 [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", + retry_template="Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", ) ], run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=76, output_tokens=14), + usage=RequestUsage(input_tokens=75, output_tokens=14), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -305,9 +304,10 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse parts=[ ToolReturnPart( tool_name='final_result', - content='Custom final result', + content='Custom final result Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -482,6 +482,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -627,6 +628,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -638,7 +640,11 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: ModelRequest( parts=[ ToolReturnPart( - tool_name='final_result', content='foobar', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='final_result', + content='foobar', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -652,6 +658,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1226,6 +1233,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1537,6 +1545,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1575,6 +1584,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2205,7 +2215,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='ret_a', + content='a-apple', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2241,7 +2255,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='ret_a', + content='a-apple', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2306,7 +2324,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='ret_a', + content='a-apple', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2370,7 +2392,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='ret_a', + content='a-apple', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2395,6 +2421,7 @@ async def ret_a(x: str) -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2422,7 +2449,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='ret_a', + content='a-apple', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2441,6 +2472,7 @@ async def ret_a(x: str) -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ], run_id=IsStr(), @@ -2466,6 +2498,7 @@ async def ret_a(x: str) -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ], run_id=IsStr(), @@ -2591,6 +2624,7 @@ def test_tool() -> str: content='Test response', tool_call_id='call_123', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -3130,24 +3164,28 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='deferred_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ] ) @@ -3178,12 +3216,14 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', ), ] ) @@ -3266,21 +3306,28 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', ), ToolReturnPart( tool_name='regular_tool', content=42, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='another_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='another_tool', + content=2, + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool', 'deferred_tool'", @@ -3293,6 +3340,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -3377,18 +3425,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool', 'deferred_tool'", @@ -3401,6 +3452,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -3459,6 +3511,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id='second', + return_kind='final-result-processed', ), ] ) @@ -3540,6 +3593,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -3852,6 +3906,7 @@ async def foobar(x: str) -> str: content='inner agent result', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4170,6 +4225,7 @@ def get_image() -> BinaryContent: content='See file image_id_1', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -4218,6 +4274,7 @@ def get_files(): content=['See file img_001', 'See file vid_002', 'See file aud_003', 'See file doc_004'], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -4445,6 +4502,7 @@ class Output(BaseModel): content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -4494,7 +4552,11 @@ def my_tool(x: int) -> int: ModelRequest( parts=[ ToolReturnPart( - tool_name='my_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='my_tool', + content=2, + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4512,7 +4574,11 @@ def my_tool(x: int) -> int: ModelRequest( parts=[ ToolReturnPart( - tool_name='my_tool', content=4, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='my_tool', + content=4, + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4544,7 +4610,7 @@ def test_agent_run_result_serialization() -> None: def test_agent_repr() -> None: agent = Agent() assert repr(agent) == snapshot( - "Agent(model=None, name=None, end_strategy='early', model_settings=None, output_type=, instrument=None)" + "Agent(model=None, name=None, end_strategy='early', model_settings=None, prompt_templates=None,output_type=, instrument=None)" ) @@ -4584,6 +4650,7 @@ def foo_tool(foo: Foo) -> int: 'tool_call_id': IsStr(), 'timestamp': IsStr(), 'part_kind': 'retry-prompt', + 'retry_template': None, } ], 'instructions': None, @@ -4682,6 +4749,7 @@ def analyze_data() -> ToolReturn: tool_call_id=IsStr(), metadata={'foo': 'bar'}, timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -4763,6 +4831,7 @@ def analyze_data() -> ToolReturn: tool_call_id=IsStr(), metadata={'foo': 'bar'}, timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), ], run_id=IsStr(), @@ -5059,6 +5128,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='foo tool added', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -5077,6 +5147,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Hello from foo', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -5156,6 +5227,7 @@ async def only_if_plan_presented( content='a', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -5180,6 +5252,7 @@ async def only_if_plan_presented( content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -5554,6 +5627,7 @@ def create_file(path: str, content: str) -> str: content='File \'new_file.py\' created with content: print("Hello, world!")', tool_call_id='create_file', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -5612,6 +5686,7 @@ def create_file(path: str, content: str) -> str: content='File \'new_file.py\' created with content: print("Hello, world!")', tool_call_id='create_file', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -5623,12 +5698,14 @@ def create_file(path: str, content: str) -> str: content="File 'ok_to_delete.py' deleted", tool_call_id='ok_to_delete', timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='delete_file', content='File cannot be deleted', tool_call_id='never_delete', timestamp=IsDatetime(), + return_kind='tool-denied', ), ], run_id=IsStr(), @@ -5653,12 +5730,14 @@ def create_file(path: str, content: str) -> str: content="File 'ok_to_delete.py' deleted", tool_call_id='ok_to_delete', timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='delete_file', content='File cannot be deleted', tool_call_id='never_delete', timestamp=IsDatetime(), + return_kind='tool-denied', ), ], run_id=IsStr(), @@ -5843,6 +5922,7 @@ def update_file(ctx: RunContext, path: str, content: str) -> str: content="File '.env' updated", tool_call_id='update_file_1', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart(content='continue with the operation', timestamp=IsDatetime()), ], @@ -6255,6 +6335,7 @@ def roll_dice() -> int: content=4, tool_call_id='pyd_ai_tool_call_id__roll_dice', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6279,6 +6360,7 @@ def roll_dice() -> int: content='Final result processed.', tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -6313,6 +6395,7 @@ def roll_dice() -> int: content=4, tool_call_id='pyd_ai_tool_call_id__roll_dice', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6337,6 +6420,7 @@ def roll_dice() -> int: content='Final result processed.', tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), From edb115a20f6b4fbed07d4754f4d0dd5d384d6a97 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 11:04:37 +0530 Subject: [PATCH 13/98] adding docstring --- pydantic_ai_slim/pydantic_ai/messages.py | 10 +++++++++- pydantic_ai_slim/pydantic_ai/ui/_event_stream.py | 1 + pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 1 + pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py | 2 +- 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 9c787b73c5..44527989e7 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -881,7 +881,15 @@ class ToolReturnPart(BaseToolReturnPart): ] | None ) = None - """The kind of tool return such as final result processed, output tool not executed, etc. This is available on all parts as a discriminator.""" + """How the tool call was resolved, used for disambiguating return parts. + + * `tool-executed`: the tool ran successfully and produced a return value + * `final-result-processed`: an output tool produced the run's final result + * `output-tool-not-executed`: an output tool was skipped because a final result already existed + * `function-tool-not-executed`: a function tool was skipped due to early termination after a final result + * `tool-denied`: the tool call was rejected by an approval handler + + """ @dataclass(repr=False) diff --git a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index 391cf06f2f..91a2980ef0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -170,6 +170,7 @@ async def transform_stream( # noqa: C901 tool_call_id=tool_call_id, tool_name=tool_name, content='Final result processed.', + return_kind='final-result-processed', ) ) async for e in self.handle_function_tool_result(output_tool_result_event): diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index fe3513ae58..f03c8ee89f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -142,6 +142,7 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: tool_name=tool_name, content=msg.content, tool_call_id=tool_call_id, + return_kind='tool-executed', ) ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 7eee52c419..38c1ad803e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -171,7 +171,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if part.state == 'output-available': builder.add( - ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output) + ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output, return_kind='tool-executed') ) elif part.state == 'output-error': builder.add( From c1d77cf6c995955a55feefc5657fc7edb30ad11c Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 11:06:32 +0530 Subject: [PATCH 14/98] lint --- pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 38c1ad803e..9955e33ced 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -171,7 +171,12 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if part.state == 'output-available': builder.add( - ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output, return_kind='tool-executed') + ToolReturnPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + content=part.output, + return_kind='tool-executed', + ) ) elif part.state == 'output-error': builder.add( From e8de0b3a0259cdef2ef55b1a914926383471309f Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 11:41:18 +0530 Subject: [PATCH 15/98] Fix tests + dbos and temporal implementation of runs with prompt_templates added --- .../pydantic_ai/agent/abstract.py | 2 + pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 8 +++ .../pydantic_ai/durable_exec/dbos/_agent.py | 28 ++++++++ .../durable_exec/temporal/_agent.py | 26 ++++++++ tests/models/test_anthropic.py | 11 ++++ tests/models/test_bedrock.py | 3 + tests/models/test_cohere.py | 2 + tests/models/test_gemini.py | 23 ++++++- tests/models/test_google.py | 28 +++++++- tests/models/test_groq.py | 6 ++ tests/models/test_huggingface.py | 1 + tests/models/test_mistral.py | 11 ++++ tests/models/test_model_function.py | 33 ++++++++-- tests/models/test_model_test.py | 9 ++- tests/models/test_openai.py | 17 ++++- tests/models/test_openai_responses.py | 14 ++++ tests/test_a2a.py | 1 + tests/test_ag_ui.py | 2 + tests/test_agent.py | 8 +-- tests/test_dbos.py | 8 +++ tests/test_mcp.py | 14 ++++ tests/test_streaming.py | 66 +++++++++++++++++-- tests/test_tools.py | 16 +++++ tests/test_usage_limits.py | 1 + tests/test_vercel_ai.py | 4 ++ 25 files changed, 316 insertions(+), 26 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 07e32ef3cd..93bfc5a4e2 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -1093,6 +1093,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -1106,6 +1107,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. """ raise NotImplementedError yield diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 38e832fa2b..58240f632c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -79,6 +79,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -98,6 +99,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -117,6 +119,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -193,6 +196,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -211,6 +215,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -229,6 +234,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -242,6 +248,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates passed to the agent constructor and agent run. """ with self.wrapped.override( name=name, @@ -250,5 +257,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, + prompt_templates=prompt_templates, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index ff6730f220..f977f9f697 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -135,6 +135,7 @@ async def wrapped_run_workflow( deps: AgentDepsT, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -152,6 +153,7 @@ async def wrapped_run_workflow( instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -176,6 +178,7 @@ def wrapped_run_sync_workflow( model_settings: ModelSettings | None = None, instructions: Instructions[AgentDepsT] = None, usage_limits: _usage.UsageLimits | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -194,6 +197,7 @@ def wrapped_run_sync_workflow( deps=deps, model_settings=model_settings, usage_limits=usage_limits, + prompt_templates=prompt_templates, usage=usage, infer_name=infer_name, toolsets=toolsets, @@ -267,6 +271,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -287,6 +292,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -306,6 +312,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -342,6 +349,7 @@ async def main(): deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. @@ -364,6 +372,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -385,6 +394,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -405,6 +415,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -424,6 +435,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -458,6 +470,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -481,6 +494,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -502,6 +516,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -522,6 +537,7 @@ def run_stream( deps: AgentDepsT = None, instructions: Instructions[AgentDepsT] = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -621,6 +637,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -640,6 +657,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -658,6 +676,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -708,6 +727,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -735,6 +755,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -755,6 +776,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -775,6 +797,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -852,6 +875,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -876,6 +900,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -895,6 +920,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -908,6 +934,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. """ if _utils.is_set(model) and not isinstance(model, (DBOSModel)): raise UserError( @@ -921,5 +948,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, + prompt_templates=prompt_templates, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 6e964c8d08..f84a05c612 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -266,6 +266,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -286,6 +287,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -305,6 +307,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -340,6 +343,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -365,6 +369,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -386,6 +391,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -425,6 +431,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -483,6 +490,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -504,6 +512,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -524,6 +533,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -544,6 +554,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -576,6 +587,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -601,6 +613,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -623,6 +636,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -642,6 +656,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -660,6 +675,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -710,6 +726,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -735,6 +752,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -754,6 +772,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -774,6 +793,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -794,6 +814,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -871,6 +892,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -905,6 +927,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -924,6 +947,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -937,6 +961,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. """ if workflow.in_workflow(): if _utils.is_set(model): @@ -959,5 +984,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, + prompt_templates=prompt_templates, ): yield diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index f770b157af..2c22d36860 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -921,6 +921,7 @@ async def test_request_structured_response(allow_model_requests: None): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1019,6 +1020,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1135,6 +1137,7 @@ async def retrieve_entity_info(name: str) -> str: content="alice is bob's wife", tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', part_kind='tool-return', ), ToolReturnPart( @@ -1142,6 +1145,7 @@ async def retrieve_entity_info(name: str) -> str: content="bob is alice's husband", tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', part_kind='tool-return', ), ToolReturnPart( @@ -1149,6 +1153,7 @@ async def retrieve_entity_info(name: str) -> str: content="charlie is alice's son", tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', part_kind='tool-return', ), ToolReturnPart( @@ -1156,6 +1161,7 @@ async def retrieve_entity_info(name: str) -> str: content="daisy is bob's daughter and charlie's younger sister", tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', part_kind='tool-return', ), ] @@ -1407,6 +1413,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id='toolu_01WALUz3dC75yywrmL6dF3Bc', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -6224,6 +6231,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='toolu_01X9wcHKKAZD9tBC711xipPa', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6261,6 +6269,7 @@ async def get_user_country() -> str: content='Final result processed.', tool_call_id='toolu_01LZABsgreMefH2Go8D5PQbW', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -6331,6 +6340,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='toolu_01JJ8TequDsrEU2pv1QFRWAK', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6422,6 +6432,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='toolu_01ArHq5f2wxRpRF2PVQcKExM', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index f13aaff4fb..d57b1884ae 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -332,6 +332,7 @@ async def temperature(city: str, date: datetime.date) -> str: content='30°C', tool_call_id='tooluse_5WEci1UmQ8ifMFkUcy2gHQ', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -362,6 +363,7 @@ async def temperature(city: str, date: datetime.date) -> str: content='Final result processed.', tool_call_id='tooluse_9AjloJSaQDKmpPFff-2Clg', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -628,6 +630,7 @@ async def get_temperature(city: str) -> str: content='30°C', tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='The')), diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index 07cb6ae9b9..a05e384a3d 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -224,6 +224,7 @@ async def test_request_structured_response(allow_model_requests: None): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -343,6 +344,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index 0f2e51c0ce..b3672dde87 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -638,6 +638,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -726,12 +727,14 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='get_location', content='{"lat": 41, "lng": -74}', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ), ], run_id=IsStr(), @@ -908,10 +911,18 @@ async def bar(y: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='foo', content='a', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='foo', + content='a', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='bar', content='b', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='bar', + content='b', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ], run_id=IsStr(), @@ -931,6 +942,7 @@ async def bar(y: str) -> str: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1003,6 +1015,7 @@ def get_location(loc_name: str) -> str: content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='function-tool-not-executed', ) ], run_id=IsStr(), @@ -1211,6 +1224,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -1718,6 +1732,7 @@ async def bar() -> str: content='hello', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1746,6 +1761,7 @@ async def bar() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1800,6 +1816,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1828,6 +1845,7 @@ async def get_user_country() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2107,6 +2125,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_google.py b/tests/models/test_google.py index 3ef8cd5dda..0e2e2f69d2 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -223,7 +223,11 @@ async def temperature(city: str, date: datetime.date) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime() + tool_name='temperature', + content='30°C', + tool_call_id=IsStr(), + timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -256,6 +260,7 @@ async def temperature(city: str, date: datetime.date) -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -624,6 +629,7 @@ async def get_capital(country: str) -> str: content='Paris', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -748,7 +754,11 @@ async def get_temperature(city: str) -> str: IsInstance(FunctionToolCallEvent), FunctionToolResultEvent( result=ToolReturnPart( - tool_name='get_capital', content='Paris', tool_call_id=IsStr(), timestamp=IsDatetime() + tool_name='get_capital', + content='Paris', + tool_call_id=IsStr(), + timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent( @@ -766,7 +776,11 @@ async def get_temperature(city: str) -> str: IsInstance(FunctionToolCallEvent), FunctionToolResultEvent( result=ToolReturnPart( - tool_name='get_temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime() + tool_name='get_temperature', + content='30°C', + tool_call_id=IsStr(), + timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='The temperature in Paris')), @@ -2442,6 +2456,7 @@ async def bar() -> str: content='hello', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2472,6 +2487,7 @@ async def bar() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2538,6 +2554,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2568,6 +2585,7 @@ async def get_user_country() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2624,6 +2642,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2865,6 +2884,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4125,6 +4145,7 @@ def get_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4187,6 +4208,7 @@ def get_country() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index dd3395750e..d984334091 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -260,6 +260,7 @@ async def test_request_structured_response(allow_model_requests: None): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -382,6 +383,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -521,6 +523,7 @@ async def test_stream_structured(allow_model_requests: None): content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -627,6 +630,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id='call_wkpd', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -5381,6 +5385,7 @@ async def get_something_by_name(name: str) -> str: content='Something with name: nonexistent', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', @@ -5507,6 +5512,7 @@ async def get_something_by_name(name: str) -> str: content='Something with name: test_name', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 56d74ed619..24fc5f3ab3 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -413,6 +413,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 4ae21ad221..05f232e718 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -452,6 +452,7 @@ class CityLocation(BaseModel): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -520,6 +521,7 @@ class CityLocation(BaseModel): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -585,6 +587,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1168,6 +1171,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1325,6 +1329,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1353,6 +1358,7 @@ async def get_location(loc_name: str) -> str: content='Final result processed.', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1458,6 +1464,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1480,6 +1487,7 @@ async def get_location(loc_name: str) -> str: content='Final result processed.', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1572,6 +1580,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1716,6 +1725,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1918,6 +1928,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id='utZJMAZN4', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ diff --git a/tests/models/test_model_function.py b/tests/models/test_model_function.py index 196b140454..83b79533a2 100644 --- a/tests/models/test_model_function.py +++ b/tests/models/test_model_function.py @@ -187,6 +187,7 @@ def test_weather(): content='{"lat": 51, "lng": 0}', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -205,6 +206,7 @@ def test_weather(): content='Raining', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -257,6 +259,7 @@ def test_var_args(): 'metadata': None, 'timestamp': IsStr() & IsNow(iso_string=True, tz=timezone.utc), # type: ignore[reportUnknownMemberType] 'part_kind': 'tool-return', + 'return_kind': 'tool-executed', } ) @@ -389,19 +392,39 @@ def test_call_all(): ModelRequest( parts=[ ToolReturnPart( - tool_name='foo', content='1', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='foo', + content='1', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='bar', content='2', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='bar', + content='2', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='baz', content='3', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='baz', + content='3', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='qux', content='4', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='qux', + content='4', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='quz', content='a', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='quz', + content='a', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ], run_id=IsStr(), diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index f7a6809a71..c6f0a30c76 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -100,6 +100,7 @@ def test_custom_output_args(): content='Final result processed.', tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -147,6 +148,7 @@ class Foo(BaseModel): content='Final result processed.', tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -190,6 +192,7 @@ def test_output_type(): content='Final result processed.', tool_call_id='pyd_ai_tool_call_id__final_result', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -248,7 +251,11 @@ async def my_ret(x: int) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='my_ret', content='1', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='my_ret', + content='1', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index b6c16b0f3e..074f63447c 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -263,6 +263,7 @@ async def test_request_structured_response(allow_model_requests: None): content='Final result processed.', tool_call_id='123', timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -395,6 +396,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id='2', timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -921,6 +923,7 @@ async def get_image() -> ImageUrl: content='See file bd38f5', tool_call_id='call_4hrT4QP9jfojtK69vGiFCFjG', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -1008,6 +1011,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id='call_Btn0GIzGr4ugNlLmkQghQUMY', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -2119,7 +2123,11 @@ async def get_temperature(city: str) -> float: ModelRequest( parts=[ ToolReturnPart( - tool_name='get_temperature', content=20.0, tool_call_id=IsStr(), timestamp=IsDatetime() + tool_name='get_temperature', + content=20.0, + tool_call_id=IsStr(), + timestamp=IsDatetime(), + return_kind='tool-executed', ) ], instructions='You are a helpful assistant.', @@ -2538,6 +2546,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2575,6 +2584,7 @@ async def get_user_country() -> str: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -2638,6 +2648,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_J1YabdC7G7kzEZNbbZopwenH', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2724,6 +2735,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_PkRGedQNRFUzJp2R7dO7avWR', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2812,6 +2824,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_SIttSeiOistt33Htj4oiHOOX', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2900,6 +2913,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_s7oT9jaLAsEqTgvxZTmFh0wB', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2988,6 +3002,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_wJD14IyJ4KKVtjCrGyNCHO09', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index fc72a48cb1..2400ac4ce7 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -287,6 +287,7 @@ async def get_location(loc_name: str) -> str: content='{"lat": 51, "lng": 0}', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ], run_id=IsStr(), @@ -363,6 +364,7 @@ async def get_image() -> BinaryContent: content='See file 1c8566', tool_call_id='call_FLm3B1f8QAan0KpbUXhNY8bA', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -1386,6 +1388,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_ZWkVhdUjupo528U9dqgFeRkH', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1415,6 +1418,7 @@ async def get_user_country() -> str: content='Final result processed.', tool_call_id='call_iFBd0zULhSZRR908DfH73VwN', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1475,6 +1479,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_aTJhYjzmixZaVGqwl5gn2Ncr', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1554,6 +1559,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_tTAThu8l2S9hNky2krdwijGP', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1635,6 +1641,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_UaLahjOtaM2tTyYZLxTCbOaP', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1712,6 +1719,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_FrlL4M0CbAy8Dhv4VqF1Shom', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1793,6 +1801,7 @@ async def get_user_country() -> str: content='Mexico', tool_call_id='call_my4OyoVXRT0m7bLWmsxcaCQI', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2355,6 +2364,7 @@ def update_plan(plan: str) -> str: content='plan updated', tool_call_id='call_gL7JE6GDeGGsFubqO2XGytyO', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", @@ -3620,6 +3630,7 @@ def get_meaning_of_life() -> int: content=42, tool_call_id='call_3WCunBU7lCG1HHaLmnnRJn8I', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6059,6 +6070,7 @@ class Animal(BaseModel): content='Final result processed.', tool_call_id='call_eE7MHM5WMJnMt5srV69NmBJk', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -6271,6 +6283,7 @@ async def get_animal() -> str: content='axolotl', tool_call_id='call_t76xO1K2zqrJkawkU3tur8vj', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -6579,6 +6592,7 @@ class CityLocation(BaseModel): content='Final result processed.', tool_call_id='call_LIXPi261Xx3dGYzlDsOoyHGk', timestamp=IsDatetime(), + return_kind='final-result-processed', ) ], run_id=IsStr(), diff --git a/tests/test_a2a.py b/tests/test_a2a.py index 4e5f74f476..edf9e6045e 100644 --- a/tests/test_a2a.py +++ b/tests/test_a2a.py @@ -638,6 +638,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon content='Final result processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='final-result-processed', ), UserPromptPart(content='Second message', timestamp=IsDatetime()), ], diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 5cbf85fc69..ff091752e1 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1555,12 +1555,14 @@ async def test_messages() -> None: content='Tool message', tool_call_id='tool_call_1', timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='tool_call_2', content='Tool message', tool_call_id='tool_call_2', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content='User message', diff --git a/tests/test_agent.py b/tests/test_agent.py index 074f3e86c8..ee3b541306 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -5502,15 +5502,9 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ) model = FunctionModel(model_function) - prompt_templates = PromptTemplates(validation_retry_prompt='HENLU') - agent = Agent(model, system_prompt='You are a helpful assistant.', prompt_templates=prompt_templates) + agent = Agent(model, system_prompt='You are a helpful assistant.') result = await agent.run('Hello') - retry_part = result.all_messages()[2].parts[0] - - response = retry_part.model_response() - - assert response == snapshot('HENLU') assert result.all_messages() == snapshot( [ diff --git a/tests/test_dbos.py b/tests/test_dbos.py index 1d3b9991db..930774d862 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -672,6 +672,7 @@ async def event_stream_handler( content='Mexico', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), FunctionToolResultEvent( @@ -680,6 +681,7 @@ async def event_stream_handler( content='Pydantic AI', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent( @@ -721,6 +723,7 @@ async def event_stream_handler( content='sunny', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent( @@ -1445,12 +1448,14 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques content=True, tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='create_file', content='Success', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ], instructions='Just call tools without asking for confirmation.', @@ -1576,12 +1581,14 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest content=True, tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='create_file', content='Success', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ], instructions='Just call tools without asking for confirmation.', @@ -1714,6 +1721,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO content='sunny', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/test_mcp.py b/tests/test_mcp.py index 221ad37548..c1357c49c8 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -260,6 +260,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) content=32.0, tool_call_id='call_QssdxTGkPblTYHmyVES1tKBj', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -429,6 +430,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): content='The weather in Mexico City is sunny and 26 degrees Celsius.', tool_call_id='call_m9goNwaHBbU926w47V7RtWPt', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -509,6 +511,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A content='Pydantic AI', tool_call_id='call_LaiWltzI39sdquflqeuF0EyE', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -585,6 +588,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age content='Pydantic AI\n', tool_call_id='call_qi5GtBeIEyT7Y3yJvVFIi062', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -663,6 +667,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: content='See file 1c8566', tool_call_id='call_nFsDHYDZigO0rOHqmChZ3pmt', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], @@ -748,6 +753,7 @@ async def test_tool_returning_image_resource_link( content='See file 1c8566', tool_call_id='call_eVFgn54V9Nuh8Y4zvuzkYjUp', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], @@ -814,6 +820,7 @@ async def test_tool_returning_audio_resource( content='See file 2d36ae', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart(content=['This is file 2d36ae:', audio_content], timestamp=IsDatetime()), ], @@ -884,6 +891,7 @@ async def test_tool_returning_audio_resource_link( content='See file 2d36ae', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -963,6 +971,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im content='See file 1c8566', tool_call_id='call_Q7xG8CCG0dyevVfUS0ubsDdN', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ @@ -1040,6 +1049,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): content={'foo': 'bar', 'baz': 123}, tool_call_id='call_oqKviITBj8PwpQjGyUu4Zu5x', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1114,6 +1124,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen content={'foo': 'bar', 'baz': 123}, tool_call_id='call_R0n2R7S9vL2aZOX25T9jahTd', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1229,6 +1240,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): content='This is not an error', tool_call_id='call_4xGyvdghYKHN8x19KWkRtA5N', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1303,6 +1315,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): content=[], tool_call_id='call_mJTuQ2Cl5SaHPTJbIILEUhJC', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1386,6 +1399,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: ], tool_call_id='call_kL0TvjEVQBDGZrn1Zv7iNYOW', timestamp=IsDatetime(), + return_kind='tool-executed', ), UserPromptPart( content=[ diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 0c6a46f3c0..ab808e47e4 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -86,7 +86,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='ret_a', + content='a-apple', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -122,7 +126,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='ret_a', + content='a-apple', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -178,7 +186,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='ret_a', + content='a-apple', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -224,7 +236,11 @@ async def ret_a(x: str) -> str: ModelRequest( parts=[ ToolReturnPart( - tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='ret_a', + content='a-apple', + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -605,6 +621,7 @@ async def ret_a(x: str) -> str: content='hello world', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -632,6 +649,7 @@ async def ret_a(x: str) -> str: content='hello world', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -656,6 +674,7 @@ async def ret_a(x: str) -> str: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -809,18 +828,21 @@ def another_tool(y: int) -> int: # pragma: no cover content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -868,12 +890,14 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='output-tool-not-executed', ), ], run_id=IsStr(), @@ -940,18 +964,28 @@ def another_tool(y: int) -> int: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='output-tool-not-executed', ), ToolReturnPart( - tool_name='regular_tool', content=42, timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='regular_tool', + content=42, + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='another_tool', content=2, timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='another_tool', + content=2, + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool'", @@ -1053,6 +1087,7 @@ def another_tool(y: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='final-result-processed', part_kind='tool-return', ), ToolReturnPart( @@ -1060,6 +1095,7 @@ def another_tool(y: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='function-tool-not-executed', part_kind='tool-return', ), ToolReturnPart( @@ -1067,6 +1103,7 @@ def another_tool(y: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='function-tool-not-executed', part_kind='tool-return', ), RetryPromptPart( @@ -1173,12 +1210,14 @@ def regular_tool(x: int) -> int: # pragma: no cover content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='output-tool-not-executed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -1257,6 +1296,7 @@ def regular_tool(x: int) -> int: content=1, tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1305,7 +1345,11 @@ def regular_tool(x: int) -> int: ModelRequest( parts=[ ToolReturnPart( - tool_name='regular_tool', content=0, timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() + tool_name='regular_tool', + content=0, + timestamp=IsNow(tz=timezone.utc), + tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1325,6 +1369,7 @@ def regular_tool(x: int) -> int: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1544,6 +1589,7 @@ def known_tool(x: int) -> int: content=10, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), ), ] @@ -1790,6 +1836,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: content=84, tool_call_id='my_tool', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1973,6 +2020,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='')), @@ -2019,6 +2067,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='')), @@ -2068,6 +2117,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='')), @@ -2111,6 +2161,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen content='See file bd38f5', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), content=[ 'This is file bd38f5:', @@ -2160,6 +2211,7 @@ async def ret_a(x: str) -> str: content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='')), diff --git a/tests/test_tools.py b/tests/test_tools.py index bcdf537994..d740dcd801 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1380,6 +1380,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: content=84, tool_call_id='my_tool', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1780,6 +1781,7 @@ def buy(fruit: str): tool_call_id='get_price_apple', metadata={'fruit': 'apple', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: banana', @@ -1793,6 +1795,7 @@ def buy(fruit: str): tool_call_id='get_price_pear', metadata={'fruit': 'pear', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: grape', @@ -1873,6 +1876,7 @@ def buy(fruit: str): tool_call_id='get_price_apple', metadata={'fruit': 'apple', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: banana', @@ -1886,6 +1890,7 @@ def buy(fruit: str): tool_call_id='get_price_pear', metadata={'fruit': 'pear', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: grape', @@ -1918,6 +1923,7 @@ def buy(fruit: str): tool_call_id='buy_banana', metadata={'fruit': 'banana', 'price': 100.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='The purchase of pears was denied.', @@ -1958,6 +1964,7 @@ def buy(fruit: str): tool_call_id='buy_banana', metadata={'fruit': 'banana', 'price': 100.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='The purchase of pears was denied.', @@ -2016,6 +2023,7 @@ def buy(fruit: str): tool_call_id='get_price_apple', metadata={'fruit': 'apple', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: banana', @@ -2029,6 +2037,7 @@ def buy(fruit: str): tool_call_id='get_price_pear', metadata={'fruit': 'pear', 'price': 10.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='Unknown fruit: grape', @@ -2048,6 +2057,7 @@ def buy(fruit: str): tool_call_id='buy_banana', metadata={'fruit': 'banana', 'price': 100.0}, timestamp=IsDatetime(), + return_kind='tool-executed', ), RetryPromptPart( content='The purchase of pears was denied.', @@ -2168,6 +2178,7 @@ def bar(x: int) -> int: content=9, tool_call_id='bar', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2221,6 +2232,7 @@ def bar(x: int) -> int: content=9, tool_call_id='bar', timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -2232,12 +2244,14 @@ def bar(x: int) -> int: content=2, tool_call_id='foo1', timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='foo', content='The tool call was denied.', tool_call_id='foo2', timestamp=IsDatetime(), + return_kind='tool-denied', ), ], run_id=IsStr(), @@ -2295,6 +2309,7 @@ def test_deferred_tool_results_serializable(): 'tool_call_id': 'foo', 'timestamp': IsDatetime(), 'part_kind': 'retry-prompt', + 'retry_template': None, }, 'any': {'foo': 'bar'}, }, @@ -2439,6 +2454,7 @@ def always_fail(ctx: RunContext[None]) -> str: content='I guess you never learn', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/test_usage_limits.py b/tests/test_usage_limits.py index ac17fd0be5..c49cce3c97 100644 --- a/tests/test_usage_limits.py +++ b/tests/test_usage_limits.py @@ -123,6 +123,7 @@ async def ret_a(x: str) -> str: content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='tool-executed', ) ], run_id=IsStr(), diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 9ac6137e8c..7d4db6308e 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -236,6 +236,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), + return_kind='tool-executed', ) ] ), @@ -256,6 +257,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content="[Scrubbed due to 'Auth']", tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', timestamp=IsDatetime(), + return_kind='tool-executed', ) ] ), @@ -1868,6 +1870,7 @@ async def test_adapter_load_messages(): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), + return_kind='tool-executed', ) ] ), @@ -1888,6 +1891,7 @@ async def test_adapter_load_messages(): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', timestamp=IsDatetime(), + return_kind='tool-executed', ) ] ), From 086e03523077e11a90fdff736bd6dc3a31db1d79 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 11:41:24 +0530 Subject: [PATCH 16/98] Fix tests + dbos and temporal implementation of runs with prompt_templates added --- tests/test_temporal.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 98039d9078..56d47c1b4f 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -805,6 +805,7 @@ async def event_stream_handler( content='Mexico', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), FunctionToolResultEvent( @@ -813,6 +814,7 @@ async def event_stream_handler( content='Pydantic AI', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent( @@ -854,6 +856,7 @@ async def event_stream_handler( content='sunny', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv', timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent( @@ -1868,12 +1871,14 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: content=True, tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ToolReturnPart( tool_name='create_file', content='Success', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ), ], instructions='Just call tools without asking for confirmation.', @@ -2024,6 +2029,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien content='sunny', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), From f23b841431a6b25cd985b2ac56bcac1aa9b925d8 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 12:15:49 +0530 Subject: [PATCH 17/98] Fix tests --- .../durable_exec/temporal/_agent.py | 1 + tests/test_agent.py | 168 +++++++++++++++++- 2 files changed, 161 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index f84a05c612..cfc4cdcb0f 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -466,6 +466,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. diff --git a/tests/test_agent.py b/tests/test_agent.py index ee3b541306..33c292e015 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -230,16 +230,27 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert result.all_messages_json().startswith(b'[{"parts":[{"content":"Hello",') -def test_prompt_templates(): - """Test both retry_prompt and final_result_processed templates.""" +def test_prompt_templates_callable(): + """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" + + def my_function_tool() -> str: + return 'function executed' def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None + if len(messages) == 1: - args_json = '{"a": "wrong", "b": "foo"}' + return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, '{"a": "wrong", "b": "foo"}')]) + else: - args_json = '{"a": 42, "b": "foo"}' - return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)]) + assert info.function_tools is not None + return ModelResponse( + parts=[ + ToolCallPart(info.output_tools[0].name, '{"a": 42, "b": "foo"}'), # Succeeds + ToolCallPart(info.output_tools[0].name, '{"a": 99, "b": "bar"}'), # Not executed + ToolCallPart(info.function_tools[0].name, '{}'), # Not executed + ] + ) agent = Agent( FunctionModel(return_model), @@ -247,20 +258,25 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse prompt_templates=PromptTemplates( retry_prompt=lambda part, ctx: f'Custom retry message {part.content}', final_result_processed=lambda part, ctx: f'Custom final result {part.content}', + output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', + function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', ), ) + agent.tool_plain(my_function_tool) + result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} + # Verify retry_prompt was applied retry_request = result.all_messages()[2] assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - # Verify the custom template includes run_id and content response = retry_part.model_response() assert "[{'type': 'int_parsing'" in response + # Full snapshot verification assert result.all_messages() == snapshot( [ ModelRequest( @@ -294,8 +310,12 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse run_id=IsStr(), ), ModelResponse( - parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=75, output_tokens=14), + parts=[ + ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), + ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), + ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), + ], + usage=RequestUsage(input_tokens=75, output_tokens=23), # More tokens for 3 tool calls model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -308,10 +328,142 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='final-result-processed', + ), + ToolReturnPart( + tool_name='final_result', + content='Custom output not executed: final_result', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', + ), + ToolReturnPart( + tool_name='my_function_tool', + content='Custom function not executed: my_function_tool', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', + ), + ], + run_id=IsStr(), + ), + ] + ) + + +def test_prompt_templates_string(): + """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" + + def my_function_tool() -> str: + return 'function executed' + + def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.output_tools is not None + + if len(messages) == 1: + return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, '{"a": "wrong", "b": "foo"}')]) + + else: + assert info.function_tools is not None + return ModelResponse( + parts=[ + ToolCallPart(info.output_tools[0].name, '{"a": 42, "b": "foo"}'), # Succeeds + ToolCallPart(info.output_tools[0].name, '{"a": 99, "b": "bar"}'), # Not executed + ToolCallPart(info.function_tools[0].name, '{}'), # Not executed + ] + ) + + agent = Agent( + FunctionModel(return_model), + output_type=Foo, + prompt_templates=PromptTemplates( + retry_prompt='Custom retry message', + final_result_processed='Custom final result', + output_tool_not_executed='Custom output not executed:', + function_tool_not_executed='Custom function not executed', + ), + ) + + agent.tool_plain(my_function_tool) + + result = agent.run_sync('Hello') + assert result.output.model_dump() == {'a': 42, 'b': 'foo'} + + # Verify retry_prompt was applied + retry_request = result.all_messages()[2] + assert isinstance(retry_request, ModelRequest) + retry_part = retry_request.parts[0] + assert isinstance(retry_part, RetryPromptPart) + _retry_response = retry_part.model_response() + # Full snapshot verification + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + run_id=IsStr(), + ), + ModelResponse( + parts=[ToolCallPart(tool_name='final_result', args='{"a": "wrong", "b": "foo"}', tool_call_id=IsStr())], + usage=RequestUsage(input_tokens=51, output_tokens=7), + model_name='function:return_model:', + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + RetryPromptPart( + tool_name='final_result', + content=[ + { + 'type': 'int_parsing', + 'loc': ('a',), + 'msg': 'Input should be a valid integer, unable to parse string as an integer', + 'input': 'wrong', + } + ], + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + retry_template='Custom retry message', ) ], run_id=IsStr(), ), + ModelResponse( + parts=[ + ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), + ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), + ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), + ], + usage=RequestUsage(input_tokens=54, output_tokens=23), # More tokens for 3 tool calls + model_name='function:return_model:', + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='final_result', + content='Custom final result', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', + ), + ToolReturnPart( + tool_name='final_result', + content='Custom output not executed:', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', + ), + ToolReturnPart( + tool_name='my_function_tool', + content='Custom function not executed', + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', + ), + ], + run_id=IsStr(), + ), ] ) From acc5420a440b117b00eb86f61923da134da49c11 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 12:18:32 +0530 Subject: [PATCH 18/98] useless diff --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 9d759df454..c9344600cc 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -923,13 +923,13 @@ async def process_tool_calls( # noqa: C901 calls_to_run: list[_messages.ToolCallPart] = [] if final_result and ctx.deps.end_strategy == 'early': for call in tool_calls_by_kind['function']: - part = _messages.ToolReturnPart( + output_parts.append(_messages.ToolReturnPart( tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', - tool_call_id=call.tool_call_id, - return_kind='function-tool-not-executed', + tool_call_id=call.tool_call_id, + return_kind='function-tool-not-executed', + ) ) - output_parts.append(part) else: calls_to_run.extend(tool_calls_by_kind['function']) From a34c39121f1c14f7ccd1f89656168831918c18d7 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 12:19:05 +0530 Subject: [PATCH 19/98] useless diff --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c9344600cc..20cc9af67d 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -981,13 +981,13 @@ async def process_tool_calls( # noqa: C901 # we shouldn't insert return parts as the deferred tools will still get a real result. if not isinstance(final_result.output, _output.DeferredToolRequests): for call in calls: - part = _messages.ToolReturnPart( + output_parts.append(_messages.ToolReturnPart( tool_name=call.tool_name, content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) - output_parts.append(part) + ) elif calls: deferred_calls['external'].extend(tool_calls_by_kind['external']) deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved']) From 5920092fa524558a9e3b0b0bcb1c6e7986fbe708 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 12:49:16 +0530 Subject: [PATCH 20/98] lint --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 20cc9af67d..19fd751aca 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -923,9 +923,10 @@ async def process_tool_calls( # noqa: C901 calls_to_run: list[_messages.ToolCallPart] = [] if final_result and ctx.deps.end_strategy == 'early': for call in tool_calls_by_kind['function']: - output_parts.append(_messages.ToolReturnPart( - tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', + output_parts.append( + _messages.ToolReturnPart( + tool_name=call.tool_name, + content='Tool not executed - a final result was already processed.', tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -981,13 +982,14 @@ async def process_tool_calls( # noqa: C901 # we shouldn't insert return parts as the deferred tools will still get a real result. if not isinstance(final_result.output, _output.DeferredToolRequests): for call in calls: - output_parts.append(_messages.ToolReturnPart( - tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', - tool_call_id=call.tool_call_id, - return_kind='function-tool-not-executed', + output_parts.append( + _messages.ToolReturnPart( + tool_name=call.tool_name, + content='Tool not executed - a final result was already processed.', + tool_call_id=call.tool_call_id, + return_kind='function-tool-not-executed', + ) ) - ) elif calls: deferred_calls['external'].extend(tool_calls_by_kind['external']) deferred_calls['unapproved'].extend(tool_calls_by_kind['unapproved']) From 4ac181f62044e2a489441eaa5b5cf0a62f64d422 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:00:25 +0530 Subject: [PATCH 21/98] fix prefect --- .../durable_exec/prefect/_agent.py | 29 ++++++++++++++++++- pydantic_ai_slim/pydantic_ai/messages.py | 5 +++- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 60c8122686..a310250e57 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -184,6 +184,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -204,6 +205,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -223,6 +225,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -258,6 +261,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -284,6 +288,7 @@ async def wrapped_run_flow() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -308,6 +313,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -328,6 +334,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -347,6 +354,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -381,6 +389,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -409,6 +418,7 @@ def wrapped_run_sync_flow() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -434,6 +444,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -454,6 +465,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -474,6 +486,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -506,6 +519,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -531,6 +545,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -553,6 +568,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -572,6 +588,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -590,6 +607,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -640,6 +658,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -665,6 +684,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -684,6 +704,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -703,6 +724,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -722,6 +744,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -798,6 +821,7 @@ async def main(): deps: Optional dependencies to use for this run. instructions: Optional additional instructions to use for this run. model_settings: Optional settings to use for this model's request. + prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -822,6 +846,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, + prompt_templates=prompt_templates, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -839,6 +864,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, + prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. @@ -852,6 +878,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. + prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. """ if _utils.is_set(model) and not isinstance(model, PrefectModel): raise UserError( @@ -859,6 +886,6 @@ def override( ) with super().override( - name=name, deps=deps, model=model, toolsets=toolsets, tools=tools, instructions=instructions + name=name, deps=deps, model=model, toolsets=toolsets, tools=tools, instructions=instructions, prompt_templates=prompt_templates ): yield diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index e1d867901f..35308678f5 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -2007,7 +2007,10 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru message.retry_template = self.retry_prompt(message, ctx) def _apply_tool_template( - self, message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[Any, _RunContext[Any]], str] + self, + message: ToolReturnPart, + ctx: _RunContext[Any], + template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], ): if isinstance(template, str): message.content = template From ef8cc54035b2cb998579a52415af71dce161e493 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:04:36 +0530 Subject: [PATCH 22/98] lint --- .../pydantic_ai/durable_exec/prefect/_agent.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index a310250e57..3d3542d025 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -886,6 +886,12 @@ def override( ) with super().override( - name=name, deps=deps, model=model, toolsets=toolsets, tools=tools, instructions=instructions, prompt_templates=prompt_templates + name=name, + deps=deps, + model=model, + toolsets=toolsets, + tools=tools, + instructions=instructions, + prompt_templates=prompt_templates, ): yield From 874d70ec9ae9d4a4bb43238cbcc4f9c1a64b86c1 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:14:05 +0530 Subject: [PATCH 23/98] fix --- .../pydantic_ai/durable_exec/temporal/_agent.py | 1 + pydantic_ai_slim/pydantic_ai/messages.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index e584ef6081..75e4471d34 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -414,6 +414,7 @@ def run_sync( deps: AgentDepsT = None, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, + prompt_templates: _messages.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 35308678f5..6b46bd5b3a 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1968,25 +1968,25 @@ class PromptTemplates: retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None """Message sent to the model after validation failures or invalid responses. - + Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Confirmation message sent when a final result is successfully processed. - + Default: "Final result processed." """ output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Message sent when an output tool call is skipped because a result was already found. - + Default: "Output tool not used - a final result was already processed." """ function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Message sent when a function tool call is skipped because a result was already found. - + Default: "Tool not executed - a final result was already processed." """ From c4ef9baf9f1dbef722e10c5094ba3cc34a3dd0a9 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:37:57 +0530 Subject: [PATCH 24/98] rolling back vercel adapter return kind --- pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py | 1 - tests/test_vercel_ai.py | 4 ---- 2 files changed, 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 3dd991e467..0e10951769 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -207,7 +207,6 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # tool_name=tool_name, tool_call_id=tool_call_id, content=part.output, - return_kind='tool-executed', ) ) elif part.state == 'output-error': diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 40de75310e..917bb51912 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -236,7 +236,6 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), - return_kind='tool-executed', ) ] ), @@ -257,7 +256,6 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content="[Scrubbed due to 'Auth']", tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', timestamp=IsDatetime(), - return_kind='tool-executed', ) ] ), @@ -1870,7 +1868,6 @@ async def test_adapter_load_messages(): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), - return_kind='tool-executed', ) ] ), @@ -1891,7 +1888,6 @@ async def test_adapter_load_messages(): content="[Scrubbed due to 'API Key']", tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', timestamp=IsDatetime(), - return_kind='tool-executed', ) ] ), From 71608af6c5618aa8a36d8001ebcaa2deb61227e9 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:48:35 +0530 Subject: [PATCH 25/98] fix test --- tests/test_temporal.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 56d47c1b4f..7f609004a8 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -449,7 +449,7 @@ async def test_complex_agent_run_in_workflow( BasicSpan(content='ctx.run_step=1'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"(,"return_kind":"tool-executed")?},"content":null,"event_kind":"function_tool_result"}' ) ), ], @@ -478,7 +478,7 @@ async def test_complex_agent_run_in_workflow( BasicSpan(content='ctx.run_step=1'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"(,"return_kind":"tool-executed")?},"content":null,"event_kind":"function_tool_result"}' ) ), ], @@ -572,7 +572,7 @@ async def test_complex_agent_run_in_workflow( BasicSpan(content='ctx.run_step=2'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"(,"return_kind":"tool-executed")?},"content":null,"event_kind":"function_tool_result"}' ) ), ], From e3681750e1912c0985d15738caa5c67f8d948595 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 13:52:45 +0530 Subject: [PATCH 26/98] RunContext type --- pydantic_ai_slim/pydantic_ai/messages.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 6b46bd5b3a..1f4c0e50e7 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1966,31 +1966,31 @@ class PromptTemplates: Each template can be a static string or a callable that receives context and returns a string. """ - retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + retry_prompt: str | Callable[[RetryPromptPart, _RunContext], str] | None = None """Message sent to the model after validation failures or invalid responses. Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + final_result_processed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None """Confirmation message sent when a final result is successfully processed. Default: "Final result processed." """ - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None """Message sent when an output tool call is skipped because a result was already found. Default: "Output tool not used - a final result was already processed." """ - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None """Message sent when a function tool call is skipped because a result was already found. Default: "Tool not executed - a final result was already processed." """ - def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): + def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext): if isinstance(message, ToolReturnPart): if message.return_kind == 'final-result-processed' and self.final_result_processed: self._apply_tool_template(message, ctx, self.final_result_processed) From e7fc0c93f72f56c4aa56e58ad2dc4a06368a0f09 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 14:01:17 +0530 Subject: [PATCH 27/98] RunContext type --- pydantic_ai_slim/pydantic_ai/messages.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 1f4c0e50e7..6b46bd5b3a 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1966,31 +1966,31 @@ class PromptTemplates: Each template can be a static string or a callable that receives context and returns a string. """ - retry_prompt: str | Callable[[RetryPromptPart, _RunContext], str] | None = None + retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None """Message sent to the model after validation failures or invalid responses. Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Confirmation message sent when a final result is successfully processed. Default: "Final result processed." """ - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Message sent when an output tool call is skipped because a result was already found. Default: "Output tool not used - a final result was already processed." """ - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext], str] | None = None + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Message sent when a function tool call is skipped because a result was already found. Default: "Tool not executed - a final result was already processed." """ - def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext): + def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): if isinstance(message, ToolReturnPart): if message.return_kind == 'final-result-processed' and self.final_result_processed: self._apply_tool_template(message, ctx, self.final_result_processed) From eefe430df9d39f20ee33854265508886a7e98356 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 14:26:13 +0530 Subject: [PATCH 28/98] fix test --- docs/deferred-tools.md | 5 +++++ docs/testing.md | 1 + docs/tools.md | 2 ++ 3 files changed, 8 insertions(+) diff --git a/docs/deferred-tools.md b/docs/deferred-tools.md index 31e14149c0..97c3b3e37d 100644 --- a/docs/deferred-tools.md +++ b/docs/deferred-tools.md @@ -150,6 +150,7 @@ print(result.all_messages()) content="File 'README.md' updated: 'Hello, world!'", tool_call_id='update_file_readme', timestamp=datetime.datetime(...), + return_kind='tool-executed', ) ], run_id='...', @@ -161,12 +162,14 @@ print(result.all_messages()) content="File '.env' updated: ''", tool_call_id='update_file_dotenv', timestamp=datetime.datetime(...), + return_kind='tool-executed', ), ToolReturnPart( tool_name='delete_file', content='Deleting files is not allowed', tool_call_id='delete_file', timestamp=datetime.datetime(...), + return_kind='tool-denied', ), UserPromptPart( content='Now create a backup of README.md', @@ -195,6 +198,7 @@ print(result.all_messages()) content="File 'README.md.bak' updated: 'Hello, world!'", tool_call_id='update_file_backup', timestamp=datetime.datetime(...), + return_kind='tool-executed', ) ], run_id='...', @@ -348,6 +352,7 @@ async def main(): content=42, tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), + return_kind='tool-executed', ) ], run_id='...', diff --git a/docs/testing.md b/docs/testing.md index 3089585ab0..99d2e01472 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -156,6 +156,7 @@ async def test_forecast(): content='Sunny with a chance of rain', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), ], run_id=IsStr(), diff --git a/docs/tools.md b/docs/tools.md index 40dcf5c810..38574ff819 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -108,6 +108,7 @@ print(dice_result.all_messages()) content='4', tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), + return_kind='tool-executed', ) ], run_id='...', @@ -130,6 +131,7 @@ print(dice_result.all_messages()) content='Anne', tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), + return_kind='tool-executed', ) ], run_id='...', From d2d049893de5f9a70fe00d54c39107e65cec651e Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 14:38:38 +0530 Subject: [PATCH 29/98] fix test --- tests/test_dbos.py | 6 +++--- tests/test_prefect.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_dbos.py b/tests/test_dbos.py index 930774d862..196a616b68 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -371,7 +371,7 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D BasicSpan(content='ctx.run_step=1'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","metadata":null,"timestamp":".+?","part_kind":"tool-return","return_kind":"tool-executed"},"content":null,"event_kind":"function_tool_result"}' ) ), ], @@ -386,7 +386,7 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D BasicSpan(content='ctx.run_step=1'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","metadata":null,"timestamp":".+?","part_kind":"tool-return","return_kind":"tool-executed"},"content":null,"event_kind":"function_tool_result"}' ) ), ], @@ -450,7 +450,7 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D BasicSpan(content='ctx.run_step=2'), BasicSpan( content=IsStr( - regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return"},"content":null,"event_kind":"function_tool_result"}' + regex=r'{"result":{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","metadata":null,"timestamp":".+?","part_kind":"tool-return","return_kind":"tool-executed"},"content":null,"event_kind":"function_tool_result"}' ) ), ], diff --git a/tests/test_prefect.py b/tests/test_prefect.py index b1c18b9803..715c80f251 100644 --- a/tests/test_prefect.py +++ b/tests/test_prefect.py @@ -305,7 +305,7 @@ async def run_complex_agent() -> Response: BasicSpan(content='ctx.run_step=1'), BasicSpan( content=IsStr( - regex=r'\{"result":\{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return"\},"content":null,"event_kind":"function_tool_result"\}' + regex=r'\{"result":\{"tool_name":"get_country","content":"Mexico","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return","return_kind":"tool-executed"\},"content":null,"event_kind":"function_tool_result"\}' ) ), ], @@ -389,7 +389,7 @@ async def run_complex_agent() -> Response: BasicSpan(content='ctx.run_step=2'), BasicSpan( content=IsStr( - regex=r'\{"result":\{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return"\},"content":null,"event_kind":"function_tool_result"\}' + regex=r'\{"result":\{"tool_name":"get_weather","content":"sunny","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return","return_kind":"tool-executed"\},"content":null,"event_kind":"function_tool_result"\}' ) ), ], @@ -406,7 +406,7 @@ async def run_complex_agent() -> Response: BasicSpan(content='ctx.run_step=2'), BasicSpan( content=IsStr( - regex=r'\{"result":\{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return"\},"content":null,"event_kind":"function_tool_result"\}' + regex=r'\{"result":\{"tool_name":"get_product_name","content":"Pydantic AI","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","metadata":null,"timestamp":"[^"]+","part_kind":"tool-return","return_kind":"tool-executed"\},"content":null,"event_kind":"function_tool_result"\}' ) ), ], From d4a0c2d358d6221565a7b14aca1db453795ba590 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 15:07:41 +0530 Subject: [PATCH 30/98] fix test + coverage --- .../pydantic_ai/agent/__init__.py | 15 ++++- pydantic_ai_slim/pydantic_ai/messages.py | 11 ++-- tests/test_agent.py | 66 ++++--------------- 3 files changed, 32 insertions(+), 60 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 5215b9c0dd..ceb52734a0 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -604,7 +604,7 @@ async def main(): merged_settings = merge_model_settings(model_used.settings, self.model_settings) model_settings = merge_model_settings(merged_settings, model_settings) usage_limits = usage_limits or _usage.UsageLimits() - prompt_templates = prompt_templates or self.prompt_templates + prompt_templates = self._get_prompt_templates(prompt_templates) instructions_literal, instructions_functions = self._get_instructions(additional_instructions=instructions) @@ -1350,6 +1350,19 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: else: return deps + def _get_prompt_templates( + self, prompt_templates: _messages.PromptTemplates | None + ) -> _messages.PromptTemplates | None: + """Get prompt_templates for a run. + + If we've overridden prompt_templates via `_override_prompt_templates`, use that, + otherwise use the prompt_templates passed to the call, falling back to the agent default. + """ + if some_prompt_templates := self._override_prompt_templates.get(): + return some_prompt_templates.value + else: + return prompt_templates or self.prompt_templates + def _normalize_instructions( self, instructions: Instructions[AgentDepsT], diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 6b46bd5b3a..787d49ee30 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1999,12 +1999,11 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru elif message.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: self._apply_tool_template(message, ctx, self.function_tool_not_executed) - elif isinstance(message, RetryPromptPart): - if self.retry_prompt: - if isinstance(self.retry_prompt, str): - message.retry_template = self.retry_prompt - else: - message.retry_template = self.retry_prompt(message, ctx) + elif isinstance(message, RetryPromptPart) and self.retry_prompt: + if isinstance(self.retry_prompt, str): + message.retry_template = self.retry_prompt + else: + message.retry_template = self.retry_prompt(message, ctx) def _apply_tool_template( self, diff --git a/tests/test_agent.py b/tests/test_agent.py index 9ded256e6a..3e9c04ab27 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -238,7 +238,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_callable(): """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - def my_function_tool() -> str: + def my_function_tool() -> str: # pragma: no cover return 'function executed' def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: @@ -318,7 +318,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse parts=[ ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), + ToolCallPart(tool_name='regular_tool', args='{}', tool_call_id=IsStr()), ], usage=RequestUsage(input_tokens=75, output_tokens=23), # More tokens for 3 tool calls model_name='function:return_model:', @@ -358,7 +358,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_string(): """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - def my_function_tool() -> str: + def my_function_tool() -> str: # pragma: no cover return 'function executed' def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: @@ -472,6 +472,16 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ] ) + # Test override - verify prompt_templates can be overridden + with agent.override(prompt_templates=PromptTemplates(retry_prompt='Custom retry message override')): + result = agent.run_sync('Hello') + assert result.output.model_dump() == {'a': 42, 'b': 'foo'} + retry_request = result.all_messages()[2] + assert isinstance(retry_request, ModelRequest) + retry_part = retry_request.parts[0] + assert isinstance(retry_part, RetryPromptPart) + assert retry_part.model_response() == 'Custom retry message override' + def test_result_pydantic_model_validation_error(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: @@ -529,56 +539,6 @@ def check_b(cls, v: str) -> str: Fix the errors and try again.""") - -def test_result_pydantic_model_validation_error_prompt_templates(): - def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: - assert info.output_tools is not None - if len(messages) == 1: - args_json = '{"a": 1, "b": "foo"}' - else: - args_json = '{"a": 1, "b": "bar"}' - return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)]) - - class Bar(BaseModel): - a: int - b: str - - @field_validator('b') - def check_b(cls, v: str) -> str: - if v == 'foo': - raise ValueError('must not be foo') - return v - - prompt_templates = PromptTemplates(retry_prompt=lambda msg, _: f'IMPORTANT: {msg.content}') - - agent = Agent(FunctionModel(return_model), output_type=Bar, prompt_templates=prompt_templates) - - print('\nAgent prompt templates', agent.prompt_templates) - - result = agent.run_sync('Hello') - assert isinstance(result.output, Bar) - assert result.output.model_dump() == snapshot({'a': 1, 'b': 'bar'}) - messages_part_kinds = [(m.kind, [p.part_kind for p in m.parts]) for m in result.all_messages()] - assert messages_part_kinds == snapshot( - [ - ('request', ['user-prompt']), - ('response', ['tool-call']), - ('request', ['retry-prompt']), - ('response', ['tool-call']), - ('request', ['tool-return']), - ] - ) - - user_retry = result.all_messages()[2] - assert isinstance(user_retry, ModelRequest) - retry_prompt = user_retry.parts[0] - assert isinstance(retry_prompt, RetryPromptPart) - print('\n Retry Prompt ', retry_prompt) - assert retry_prompt.model_response() == snapshot( - "IMPORTANT: [{'type': 'value_error', 'loc': ('b',), 'msg': 'Value error, must not be foo', 'input': 'foo'}]" - ) - - def test_output_validator(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From 2e8a1f22a99e36815336f51b532e8921ae166644 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 15:18:10 +0530 Subject: [PATCH 31/98] fix lint --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- tests/test_agent.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index ceb52734a0..4cf5dcfe2d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1355,7 +1355,7 @@ def _get_prompt_templates( ) -> _messages.PromptTemplates | None: """Get prompt_templates for a run. - If we've overridden prompt_templates via `_override_prompt_templates`, use that, + If we've overridden prompt_templates via `_override_prompt_templates`, use that, otherwise use the prompt_templates passed to the call, falling back to the agent default. """ if some_prompt_templates := self._override_prompt_templates.get(): diff --git a/tests/test_agent.py b/tests/test_agent.py index 3e9c04ab27..d6106ecbd0 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -238,7 +238,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_callable(): """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - def my_function_tool() -> str: # pragma: no cover + def my_function_tool() -> str: # pragma: no cover return 'function executed' def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: @@ -358,7 +358,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_string(): """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - def my_function_tool() -> str: # pragma: no cover + def my_function_tool() -> str: # pragma: no cover return 'function executed' def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: @@ -539,6 +539,7 @@ def check_b(cls, v: str) -> str: Fix the errors and try again.""") + def test_output_validator(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From f8b502674a9aa4a7ce9d1bee5497b083ea96709e Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 15:24:42 +0530 Subject: [PATCH 32/98] fix test --- tests/test_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index d6106ecbd0..efa5dd0b25 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -318,7 +318,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse parts=[ ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='regular_tool', args='{}', tool_call_id=IsStr()), + ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], usage=RequestUsage(input_tokens=75, output_tokens=23), # More tokens for 3 tool calls model_name='function:return_model:', From b3632b791906ff75eeeb1c8dc74329935609723a Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 16:59:37 +0530 Subject: [PATCH 33/98] fix test --- tests/test_agent.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index efa5dd0b25..b3e247233f 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -273,15 +273,12 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} - # Verify retry_prompt was applied retry_request = result.all_messages()[2] assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - response = retry_part.model_response() - assert "[{'type': 'int_parsing'" in response + retry_part.model_response() - # Full snapshot verification assert result.all_messages() == snapshot( [ ModelRequest( @@ -355,7 +352,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ) -def test_prompt_templates_string(): +def test_prompt_templates_string_and_override_prompt_templates(): """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" def my_function_tool() -> str: # pragma: no cover @@ -393,13 +390,11 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} - # Verify retry_prompt was applied retry_request = result.all_messages()[2] assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - _retry_response = retry_part.model_response() - # Full snapshot verification + retry_part.model_response() assert result.all_messages() == snapshot( [ ModelRequest( @@ -472,7 +467,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ] ) - # Test override - verify prompt_templates can be overridden + # Verify prompt_templates can be overridden with agent.override(prompt_templates=PromptTemplates(retry_prompt='Custom retry message override')): result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} From 9bebf4fed351710c1ee5c0f0e45d76074909f556 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 17:03:20 +0530 Subject: [PATCH 34/98] lint --- tests/test_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index b3e247233f..4002d15568 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -467,7 +467,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ] ) - # Verify prompt_templates can be overridden + # Verify prompt_templates can be overridden with agent.override(prompt_templates=PromptTemplates(retry_prompt='Custom retry message override')): result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} From 59981c106a12f5656aa2498291aadf25c83820ef Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 6 Dec 2025 17:11:48 +0530 Subject: [PATCH 35/98] renaming variable --- pydantic_ai_slim/pydantic_ai/messages.py | 12 ++++++------ tests/test_agent.py | 6 +++--- tests/test_tools.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 787d49ee30..318eb44782 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -956,14 +956,14 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" - retry_template: str | None = None - """Message compiled using the provided prompt template. This message will be sent to the model to retry if present.""" + retry_message: str | None = None + """The retry message rendered using the user's prompt template. Used instead of the default retry message when present.""" def model_response(self) -> str: """Return a string message describing why the retry is requested.""" - if self.retry_template: + if self.retry_message: # We added this based on a provided prompt template so let us use this instead of our usual string - return self.retry_template + return self.retry_message if isinstance(self.content, str): if self.tool_name is None: @@ -2001,9 +2001,9 @@ def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _Ru elif isinstance(message, RetryPromptPart) and self.retry_prompt: if isinstance(self.retry_prompt, str): - message.retry_template = self.retry_prompt + message.retry_message = self.retry_prompt else: - message.retry_template = self.retry_prompt(message, ctx) + message.retry_message = self.retry_prompt(message, ctx) def _apply_tool_template( self, diff --git a/tests/test_agent.py b/tests/test_agent.py index 4002d15568..de38e98deb 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -306,7 +306,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_template="Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", + retry_message="Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", ) ], run_id=IsStr(), @@ -422,7 +422,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_template='Custom retry message', + retry_message='Custom retry message', ) ], run_id=IsStr(), @@ -4763,7 +4763,7 @@ def foo_tool(foo: Foo) -> int: 'tool_call_id': IsStr(), 'timestamp': IsStr(), 'part_kind': 'retry-prompt', - 'retry_template': None, + 'retry_message': None, } ], 'instructions': None, diff --git a/tests/test_tools.py b/tests/test_tools.py index d740dcd801..ac57ba285d 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -2309,7 +2309,7 @@ def test_deferred_tool_results_serializable(): 'tool_call_id': 'foo', 'timestamp': IsDatetime(), 'part_kind': 'retry-prompt', - 'retry_template': None, + 'retry_message': None, }, 'any': {'foo': 'bar'}, }, From 987293e7ada6b7ef07290e9abbb2ca234976359a Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 9 Dec 2025 12:46:06 +0530 Subject: [PATCH 36/98] removing useless comment --- tests/test_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index de38e98deb..b235e8e9a9 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -317,7 +317,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], - usage=RequestUsage(input_tokens=75, output_tokens=23), # More tokens for 3 tool calls + usage=RequestUsage(input_tokens=75, output_tokens=23), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -433,7 +433,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], - usage=RequestUsage(input_tokens=54, output_tokens=23), # More tokens for 3 tool calls + usage=RequestUsage(input_tokens=54, output_tokens=23), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), From def1747c02b4ca169b848ac0edb9fbd022b055f4 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 09:46:34 +0530 Subject: [PATCH 37/98] rolling back from __repr__ --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 4cf5dcfe2d..7161352b95 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -421,7 +421,7 @@ def event_stream_handler(self) -> EventStreamHandler[AgentDepsT] | None: return self._event_stream_handler def __repr__(self) -> str: - return f'{type(self).__name__}(model={self.model!r}, name={self.name!r}, end_strategy={self.end_strategy!r}, model_settings={self.model_settings!r}, prompt_templates={self.prompt_templates!r},output_type={self.output_type!r}, instrument={self.instrument!r})' + return f'{type(self).__name__}(model={self.model!r}, name={self.name!r}, end_strategy={self.end_strategy!r}, model_settings={self.model_settings!r}, output_type={self.output_type!r}, instrument={self.instrument!r})' @overload def iter( From 74c6e236834cf47d3170e0abceca8234591cef39 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 11:02:36 +0530 Subject: [PATCH 38/98] removing mutating of message history without copy(ruining history) --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 21 +++++++++---- pydantic_ai_slim/pydantic_ai/messages.py | 32 +++++++++++--------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 19fd751aca..31c4210654 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -511,8 +511,11 @@ async def _prepare_request( ctx.deps.new_message_index -= len(original_history) - len(message_history) prompt_templates = ctx.deps.prompt_templates + if prompt_templates: - _apply_prompt_templates(message_history, prompt_templates, run_context) + message_history = _apply_prompt_templates(message_history, prompt_templates, run_context) + + ctx.state.message_history[:] = message_history # Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts, # but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries. @@ -790,8 +793,7 @@ def _handle_final_result( if tool_responses and ctx.deps.prompt_templates: run_ctx = build_run_context(ctx) - for part in tool_responses: - ctx.deps.prompt_templates.apply_template(part, run_ctx) + tool_responses = [ctx.deps.prompt_templates.apply_template(part, run_ctx) for part in tool_responses] # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: @@ -1382,7 +1384,14 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess def _apply_prompt_templates( messages: list[_messages.ModelMessage], prompt_templates: _messages.PromptTemplates, ctx: RunContext[Any] -): +) -> list[_messages.ModelMessage]: + messages_template_applied: list[_messages.ModelMessage] = [] + for msg in messages: - for msg_part in msg.parts: - prompt_templates.apply_template(msg_part, ctx) + if isinstance(msg, _messages.ModelRequest): + parts_template_applied = [prompt_templates.apply_template(part, ctx) for part in msg.parts] + messages_template_applied.append(replace(msg, parts=parts_template_applied)) + else: + messages_template_applied.append(msg) + + return messages_template_applied diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 5658892df5..b2c4f3d023 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -2024,20 +2024,20 @@ class PromptTemplates: Default: "Tool not executed - a final result was already processed." """ - def apply_template(self, message: ModelRequestPart | ModelResponsePart, ctx: _RunContext[Any]): - if isinstance(message, ToolReturnPart): - if message.return_kind == 'final-result-processed' and self.final_result_processed: - self._apply_tool_template(message, ctx, self.final_result_processed) - elif message.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: - self._apply_tool_template(message, ctx, self.output_tool_not_executed) - elif message.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: - self._apply_tool_template(message, ctx, self.function_tool_not_executed) - - elif isinstance(message, RetryPromptPart) and self.retry_prompt: + def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: + if isinstance(message_part, ToolReturnPart): + if message_part.return_kind == 'final-result-processed' and self.final_result_processed: + return self._apply_tool_template(message_part, ctx, self.final_result_processed) + elif message_part.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: + return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) + elif message_part.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: + return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) + elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: if isinstance(self.retry_prompt, str): - message.retry_message = self.retry_prompt + return replace(message_part, retry_message=self.retry_prompt) else: - message.retry_message = self.retry_prompt(message, ctx) + return replace(message_part, retry_message=self.retry_prompt(message_part, ctx)) + return message_part # Returns the original message if no template is applied def _apply_tool_template( self, @@ -2045,7 +2045,11 @@ def _apply_tool_template( ctx: _RunContext[Any], template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], ): + message_part: ToolReturnPart = message + if isinstance(template, str): - message.content = template + message_part = replace(message_part, content=template) + else: - message.content = template(message, ctx) + message_part = replace(message_part, content=template(message, ctx)) + return message_part From 9aadb713154bd7b115015202c83a7ed43d00256d Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 11:39:56 +0530 Subject: [PATCH 39/98] moving prompt_templates to a diff file --- pydantic_ai_slim/pydantic_ai/__init__.py | 3 + pydantic_ai_slim/pydantic_ai/_agent_graph.py | 6 +- .../pydantic_ai/agent/__init__.py | 21 +++--- .../pydantic_ai/agent/abstract.py | 41 +++++------ pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 9 +-- .../pydantic_ai/durable_exec/dbos/_agent.py | 35 +++++----- .../durable_exec/prefect/_agent.py | 33 ++++----- .../durable_exec/temporal/_agent.py | 33 ++++----- .../pydantic_ai/prompt_templates.py | 70 +++++++++++++++++++ tests/test_agent.py | 2 +- 10 files changed, 166 insertions(+), 87 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/prompt_templates.py diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index c860d20dd8..95e111f5f1 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -94,6 +94,7 @@ ModelProfile, ModelProfileSpec, ) +from .prompt_templates import PromptTemplates from .run import AgentRun, AgentRunResult, AgentRunResultEvent from .settings import ModelSettings from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied @@ -229,6 +230,8 @@ 'PromptedOutput', 'TextOutput', 'StructuredDict', + # prompt_templates + 'PromptTemplates', # format_prompt 'format_as_xml', # settings diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 31c4210654..1bb602b48e 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -25,7 +25,7 @@ from pydantic_graph.beta import Graph, GraphBuilder from pydantic_graph.nodes import End, NodeRunEndT -from . import _output, _system_prompt, exceptions, messages as _messages, models, result, usage as _usage +from . import _output, _system_prompt, exceptions, messages as _messages, models, prompt_templates as _prompt_templates, result, usage as _usage from .exceptions import ToolRetryError from .output import OutputDataT, OutputSpec from .settings import ModelSettings @@ -138,7 +138,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _messages.PromptTemplates | None + prompt_templates: _prompt_templates.PromptTemplates | None usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy @@ -1383,7 +1383,7 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess def _apply_prompt_templates( - messages: list[_messages.ModelMessage], prompt_templates: _messages.PromptTemplates, ctx: RunContext[Any] + messages: list[_messages.ModelMessage], prompt_templates: _prompt_templates.PromptTemplates, ctx: RunContext[Any] ) -> list[_messages.ModelMessage]: messages_template_applied: list[_messages.ModelMessage] = [] diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 7161352b95..108c715fcf 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -24,6 +24,7 @@ exceptions, messages as _messages, models, + prompt_templates as _prompt_templates, usage as _usage, ) from .._agent_graph import ( @@ -125,7 +126,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): be merged with this value, with the runtime argument taking priority. """ - prompt_templates: _messages.PromptTemplates | None + prompt_templates: _prompt_templates.PromptTemplates | None """Optional prompt templates used to customize the system-injected messages for this agent.""" _output_type: OutputSpec[OutputDataT] @@ -170,7 +171,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -223,7 +224,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -365,7 +366,7 @@ def __init__( self._override_instructions: ContextVar[ _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] ] = ContextVar('_override_instructions', default=None) - self._override_prompt_templates: ContextVar[_utils.Option[_messages.PromptTemplates]] = ContextVar( + self._override_prompt_templates: ContextVar[_utils.Option[_prompt_templates.PromptTemplates]] = ContextVar( '_override_prompt_templates', default=None ) @@ -435,7 +436,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -455,7 +456,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -475,7 +476,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -767,7 +768,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -1351,8 +1352,8 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: return deps def _get_prompt_templates( - self, prompt_templates: _messages.PromptTemplates | None - ) -> _messages.PromptTemplates | None: + self, prompt_templates: _prompt_templates.PromptTemplates | None + ) -> _prompt_templates.PromptTemplates | None: """Get prompt_templates for a run. If we've overridden prompt_templates via `_override_prompt_templates`, use that, diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index d4b0cb186f..0125538187 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -21,6 +21,7 @@ exceptions, messages as _messages, models, + prompt_templates as _prompt_templates, result, usage as _usage, ) @@ -160,7 +161,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -181,7 +182,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -201,7 +202,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -290,7 +291,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -311,7 +312,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -331,7 +332,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -412,7 +413,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -433,7 +434,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -454,7 +455,7 @@ async def run_stream( # noqa: C901 instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -650,7 +651,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -670,7 +671,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -689,7 +690,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -784,7 +785,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -804,7 +805,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -823,7 +824,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -919,7 +920,7 @@ async def _run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -976,7 +977,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -996,7 +997,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -1017,7 +1018,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -1119,7 +1120,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 042a762e07..448438171c 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -8,6 +8,7 @@ _utils, messages as _messages, models, + prompt_templates as _prompt_templates, usage as _usage, ) from .._json_schema import JsonSchema @@ -84,7 +85,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -104,7 +105,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -124,7 +125,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -239,7 +240,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index 8e8c3b1bb5..af7d945cd7 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -13,6 +13,7 @@ _utils, messages as _messages, models, + prompt_templates as _prompt_templates, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -136,7 +137,7 @@ async def wrapped_run_workflow( deps: AgentDepsT, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -179,7 +180,7 @@ def wrapped_run_sync_workflow( model_settings: ModelSettings | None = None, instructions: Instructions[AgentDepsT] = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -272,7 +273,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -293,7 +294,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -313,7 +314,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -395,7 +396,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -416,7 +417,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -436,7 +437,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -517,7 +518,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -538,7 +539,7 @@ def run_stream( deps: AgentDepsT = None, instructions: Instructions[AgentDepsT] = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -638,7 +639,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -658,7 +659,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -677,7 +678,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -756,7 +757,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -777,7 +778,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -798,7 +799,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -921,7 +922,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 3d3542d025..77c049b609 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -16,6 +16,7 @@ _utils, messages as _messages, models, + prompt_templates as _prompt_templates, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -184,7 +185,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -205,7 +206,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -225,7 +226,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -313,7 +314,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -334,7 +335,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -354,7 +355,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -444,7 +445,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -465,7 +466,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -486,7 +487,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -568,7 +569,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -588,7 +589,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -607,7 +608,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -704,7 +705,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -724,7 +725,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -744,7 +745,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -864,7 +865,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 75e4471d34..e1b0e627e5 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -21,6 +21,7 @@ _utils, messages as _messages, models, + prompt_templates as _prompt_templates, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -267,7 +268,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -288,7 +289,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -308,7 +309,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -392,7 +393,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -414,7 +415,7 @@ def run_sync( deps: AgentDepsT = None, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -433,7 +434,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -515,7 +516,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -536,7 +537,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -557,7 +558,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -639,7 +640,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -659,7 +660,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -678,7 +679,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -775,7 +776,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -796,7 +797,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -817,7 +818,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _messages.PromptTemplates | None = None, + prompt_templates: _prompt_templates.PromptTemplates | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -950,7 +951,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _messages.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py new file mode 100644 index 0000000000..69966f2b07 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -0,0 +1,70 @@ +from __future__ import annotations as _annotations + +from dataclasses import dataclass, replace +from typing import Any, Callable, TYPE_CHECKING + +if TYPE_CHECKING: + from ._run_context import RunContext as _RunContext + +from .messages import ModelRequestPart, RetryPromptPart, ToolReturnPart + +@dataclass +class PromptTemplates: + """Templates for customizing messages that Pydantic AI sends to models. + + Each template can be a static string or a callable that receives context and returns a string. + """ + + retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + """Message sent to the model after validation failures or invalid responses. + + Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." + """ + + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Confirmation message sent when a final result is successfully processed. + + Default: "Final result processed." + """ + + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when an output tool call is skipped because a result was already found. + + Default: "Output tool not used - a final result was already processed." + """ + + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when a function tool call is skipped because a result was already found. + + Default: "Tool not executed - a final result was already processed." + """ + + def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: + if isinstance(message_part, ToolReturnPart): + if message_part.return_kind == 'final-result-processed' and self.final_result_processed: + return self._apply_tool_template(message_part, ctx, self.final_result_processed) + elif message_part.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: + return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) + elif message_part.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: + return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) + elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: + if isinstance(self.retry_prompt, str): + return replace(message_part, retry_message=self.retry_prompt) + else: + return replace(message_part, retry_message=self.retry_prompt(message_part, ctx)) + return message_part # Returns the original message if no template is applied + + def _apply_tool_template( + self, + message: ToolReturnPart, + ctx: _RunContext[Any], + template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], + ): + message_part: ToolReturnPart = message + + if isinstance(template, str): + message_part = replace(message_part, content=template) + + else: + message_part = replace(message_part, content=template(message, ctx)) + return message_part diff --git a/tests/test_agent.py b/tests/test_agent.py index 27f06d9fcc..3f4bb32848 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -63,7 +63,7 @@ WebSearchTool, WebSearchUserLocation, ) -from pydantic_ai.messages import PromptTemplates +from pydantic_ai.prompt_templates import PromptTemplates from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import OutputObjectDefinition, StructuredDict, ToolOutput From 41f4f2b22444bec68bbafc113007ed05eb285e90 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:01:46 +0530 Subject: [PATCH 40/98] Using class default values for init of content --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 12 ++++++------ pydantic_ai_slim/pydantic_ai/prompt_templates.py | 12 ++++++------ tests/test_agent.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 1bb602b48e..3aec0d4ac1 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -138,7 +138,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _prompt_templates.PromptTemplates | None + prompt_templates: _prompt_templates.PromptTemplates usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy @@ -881,7 +881,7 @@ async def process_tool_calls( # noqa: C901 if final_result.tool_call_id == call.tool_call_id: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content='Final result processed.', + content=_prompt_templates.PromptTemplates.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -889,7 +889,7 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content='Output tool not used - a final result was already processed.', + content=_prompt_templates.PromptTemplates.output_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='output-tool-not-executed', ) @@ -914,7 +914,7 @@ async def process_tool_calls( # noqa: C901 else: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content='Final result processed.', + content=_prompt_templates.PromptTemplates.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -928,7 +928,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', + content=_prompt_templates.PromptTemplates.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -987,7 +987,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content='Tool not executed - a final result was already processed.', + content=_prompt_templates.PromptTemplates.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 69966f2b07..7c7b8d1c32 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -21,19 +21,19 @@ class PromptTemplates: Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Final result processed." """Confirmation message sent when a final result is successfully processed. Default: "Final result processed." """ - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Output tool not used - a final result was already processed." """Message sent when an output tool call is skipped because a result was already found. Default: "Output tool not used - a final result was already processed." """ - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Tool not executed - a final result was already processed." """Message sent when a function tool call is skipped because a result was already found. Default: "Tool not executed - a final result was already processed." @@ -41,11 +41,11 @@ class PromptTemplates: def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): - if message_part.return_kind == 'final-result-processed' and self.final_result_processed: + if message_part.return_kind == 'final-result-processed': return self._apply_tool_template(message_part, ctx, self.final_result_processed) - elif message_part.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: + elif message_part.return_kind == 'output-tool-not-executed': return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) - elif message_part.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: + elif message_part.return_kind == 'function-tool-not-executed': return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: if isinstance(self.retry_prompt, str): diff --git a/tests/test_agent.py b/tests/test_agent.py index 3f4bb32848..c093e66137 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -4726,7 +4726,7 @@ def test_agent_run_result_serialization() -> None: def test_agent_repr() -> None: agent = Agent() assert repr(agent) == snapshot( - "Agent(model=None, name=None, end_strategy='early', model_settings=None, prompt_templates=None,output_type=, instrument=None)" + "Agent(model=None, name=None, end_strategy='early', model_settings=None, output_type=, instrument=None)" ) From 8253d8f92d3450f27c77b04aa72dfd4fbf6bd96b Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:07:25 +0530 Subject: [PATCH 41/98] Moving tool call denied --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 13 +++++++++-- .../pydantic_ai/prompt_templates.py | 22 +++++++++++++------ pydantic_ai_slim/pydantic_ai/tools.py | 3 --- tests/test_agent.py | 2 +- 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 3aec0d4ac1..c953031a48 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -25,7 +25,16 @@ from pydantic_graph.beta import Graph, GraphBuilder from pydantic_graph.nodes import End, NodeRunEndT -from . import _output, _system_prompt, exceptions, messages as _messages, models, prompt_templates as _prompt_templates, result, usage as _usage +from . import ( + _output, + _system_prompt, + exceptions, + messages as _messages, + models, + prompt_templates as _prompt_templates, + result, + usage as _usage, +) from .exceptions import ToolRetryError from .output import OutputDataT, OutputSpec from .settings import ModelSettings @@ -1144,7 +1153,7 @@ async def _call_tool( elif isinstance(tool_call_result, ToolDenied): return _messages.ToolReturnPart( tool_name=tool_call.tool_name, - content=tool_call_result.message, + content=_prompt_templates.PromptTemplates.tool_call_denied, tool_call_id=tool_call.tool_call_id, return_kind='tool-denied', ), None diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 7c7b8d1c32..cd08972055 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -1,13 +1,15 @@ from __future__ import annotations as _annotations +from collections.abc import Callable from dataclasses import dataclass, replace -from typing import Any, Callable, TYPE_CHECKING +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ._run_context import RunContext as _RunContext from .messages import ModelRequestPart, RetryPromptPart, ToolReturnPart + @dataclass class PromptTemplates: """Templates for customizing messages that Pydantic AI sends to models. @@ -21,24 +23,28 @@ class PromptTemplates: Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Final result processed." + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' """Confirmation message sent when a final result is successfully processed. - Default: "Final result processed." """ - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Output tool not used - a final result was already processed." + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( + 'Output tool not used - a final result was already processed.' + ) """Message sent when an output tool call is skipped because a result was already found. - Default: "Output tool not used - a final result was already processed." """ - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = "Tool not executed - a final result was already processed." + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( + 'Tool not executed - a final result was already processed.' + ) """Message sent when a function tool call is skipped because a result was already found. - Default: "Tool not executed - a final result was already processed." """ + tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Tool call was denied.' + """Message sent when a tool call is denied.""" + def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): if message_part.return_kind == 'final-result-processed': @@ -47,6 +53,8 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) elif message_part.return_kind == 'function-tool-not-executed': return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) + elif message_part.return_kind == 'tool-denied': + return self._apply_tool_template(message_part, ctx, self.tool_call_denied) elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: if isinstance(self.retry_prompt, str): return replace(message_part, retry_message=self.retry_prompt) diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index dcd860b019..3d7aa55e11 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -176,9 +176,6 @@ class ToolApproved: class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" - message: str = 'The tool call was denied.' - """The message to return to the model.""" - _: KW_ONLY kind: Literal['tool-denied'] = 'tool-denied' diff --git a/tests/test_agent.py b/tests/test_agent.py index c093e66137..78c8a9f044 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -63,10 +63,10 @@ WebSearchTool, WebSearchUserLocation, ) -from pydantic_ai.prompt_templates import PromptTemplates from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import OutputObjectDefinition, StructuredDict, ToolOutput +from pydantic_ai.prompt_templates import PromptTemplates from pydantic_ai.result import RunUsage from pydantic_ai.settings import ModelSettings from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolDefinition, ToolDenied From 09b25975ab13a4f94b300186822ceff7314ce058 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:10:17 +0530 Subject: [PATCH 42/98] removing prompt_templates from messages.py --- pydantic_ai_slim/pydantic_ai/messages.py | 60 ------------------------ 1 file changed, 60 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index b2c4f3d023..4a13fce211 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1993,63 +1993,3 @@ class BuiltinToolResultEvent: """An event in the agent stream: model response stream events and response-handling events.""" -@dataclass -class PromptTemplates: - """Templates for customizing messages that Pydantic AI sends to models. - - Each template can be a static string or a callable that receives context and returns a string. - """ - - retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None - """Message sent to the model after validation failures or invalid responses. - - Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." - """ - - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None - """Confirmation message sent when a final result is successfully processed. - - Default: "Final result processed." - """ - - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None - """Message sent when an output tool call is skipped because a result was already found. - - Default: "Output tool not used - a final result was already processed." - """ - - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None - """Message sent when a function tool call is skipped because a result was already found. - - Default: "Tool not executed - a final result was already processed." - """ - - def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: - if isinstance(message_part, ToolReturnPart): - if message_part.return_kind == 'final-result-processed' and self.final_result_processed: - return self._apply_tool_template(message_part, ctx, self.final_result_processed) - elif message_part.return_kind == 'output-tool-not-executed' and self.output_tool_not_executed: - return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) - elif message_part.return_kind == 'function-tool-not-executed' and self.function_tool_not_executed: - return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) - elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: - if isinstance(self.retry_prompt, str): - return replace(message_part, retry_message=self.retry_prompt) - else: - return replace(message_part, retry_message=self.retry_prompt(message_part, ctx)) - return message_part # Returns the original message if no template is applied - - def _apply_tool_template( - self, - message: ToolReturnPart, - ctx: _RunContext[Any], - template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], - ): - message_part: ToolReturnPart = message - - if isinstance(template, str): - message_part = replace(message_part, content=template) - - else: - message_part = replace(message_part, content=template(message, ctx)) - return message_part From 0477465fd4941cb9636698b94edd3d7fcaa7c52f Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:13:42 +0530 Subject: [PATCH 43/98] lint --- pydantic_ai_slim/pydantic_ai/messages.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 4a13fce211..53d1dcc1a0 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -23,7 +23,6 @@ from .usage import RequestUsage if TYPE_CHECKING: - from ._run_context import RunContext as _RunContext from .models.instrumented import InstrumentationSettings @@ -1991,5 +1990,3 @@ class BuiltinToolResultEvent: AgentStreamEvent = Annotated[ModelResponseStreamEvent | HandleResponseEvent, pydantic.Discriminator('event_kind')] """An event in the agent stream: model response stream events and response-handling events.""" - - From f5fb9944fd155001f79b4c91bb332d4122fc9386 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:26:24 +0530 Subject: [PATCH 44/98] keep prompt_templates non-able, read default values off of the class directly --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index c953031a48..b54e03465e 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -147,7 +147,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _prompt_templates.PromptTemplates + prompt_templates: _prompt_templates.PromptTemplates | None usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy From b6415aabfc437015c4bf5174529426f481fe3a71 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 12:43:11 +0530 Subject: [PATCH 45/98] fixing ToolDenied --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 2 +- pydantic_ai_slim/pydantic_ai/tools.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index b54e03465e..a08283a4ad 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -1153,7 +1153,7 @@ async def _call_tool( elif isinstance(tool_call_result, ToolDenied): return _messages.ToolReturnPart( tool_name=tool_call.tool_name, - content=_prompt_templates.PromptTemplates.tool_call_denied, + content=tool_call_result.message, tool_call_id=tool_call.tool_call_id, return_kind='tool-denied', ), None diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index 3d7aa55e11..6a69a08b06 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -14,6 +14,7 @@ from .builtin_tools import AbstractBuiltinTool from .exceptions import ModelRetry from .messages import RetryPromptPart, ToolCallPart, ToolReturn +from .prompt_templates import PromptTemplates __all__ = ( 'AgentDepsT', @@ -176,6 +177,9 @@ class ToolApproved: class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" + message: str = cast(str, PromptTemplates.tool_call_denied) + """The message to return to the model.""" + _: KW_ONLY kind: Literal['tool-denied'] = 'tool-denied' From 339ea74488670d4312ee11c1fd2242268b4cb8a0 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 13:09:29 +0530 Subject: [PATCH 46/98] Moving to a default instance instead of reading class variables --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 17 ++++++++--------- .../pydantic_ai/prompt_templates.py | 3 +++ pydantic_ai_slim/pydantic_ai/tools.py | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index a08283a4ad..4b3fb50b85 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -147,7 +147,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _prompt_templates.PromptTemplates | None + prompt_templates: _prompt_templates.PromptTemplates = _prompt_templates.DEFAULT_PROMPT_TEMPLATES usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy @@ -521,8 +521,7 @@ async def _prepare_request( prompt_templates = ctx.deps.prompt_templates - if prompt_templates: - message_history = _apply_prompt_templates(message_history, prompt_templates, run_context) + message_history = _apply_prompt_templates(message_history, prompt_templates, run_context) ctx.state.message_history[:] = message_history @@ -800,7 +799,7 @@ def _handle_final_result( ) -> End[result.FinalResult[NodeRunEndT]]: messages = ctx.state.message_history - if tool_responses and ctx.deps.prompt_templates: + if tool_responses: run_ctx = build_run_context(ctx) tool_responses = [ctx.deps.prompt_templates.apply_template(part, run_ctx) for part in tool_responses] @@ -890,7 +889,7 @@ async def process_tool_calls( # noqa: C901 if final_result.tool_call_id == call.tool_call_id: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.PromptTemplates.final_result_processed, + content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -898,7 +897,7 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.PromptTemplates.output_tool_not_executed, + content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.output_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='output-tool-not-executed', ) @@ -923,7 +922,7 @@ async def process_tool_calls( # noqa: C901 else: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.PromptTemplates.final_result_processed, + content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -937,7 +936,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.PromptTemplates.function_tool_not_executed, + content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -996,7 +995,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.PromptTemplates.function_tool_not_executed, + content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index cd08972055..071976c560 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -76,3 +76,6 @@ def _apply_tool_template( else: message_part = replace(message_part, content=template(message, ctx)) return message_part + + +DEFAULT_PROMPT_TEMPLATES = PromptTemplates() diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index 6a69a08b06..738a47679c 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -14,7 +14,7 @@ from .builtin_tools import AbstractBuiltinTool from .exceptions import ModelRetry from .messages import RetryPromptPart, ToolCallPart, ToolReturn -from .prompt_templates import PromptTemplates +from .prompt_templates import DEFAULT_PROMPT_TEMPLATES __all__ = ( 'AgentDepsT', @@ -177,7 +177,7 @@ class ToolApproved: class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" - message: str = cast(str, PromptTemplates.tool_call_denied) + message: str = cast(str, DEFAULT_PROMPT_TEMPLATES.tool_call_denied) """The message to return to the model.""" _: KW_ONLY From 8141c3afcef9d6870c8b269c8e99af6ae75ef286 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 13:22:09 +0530 Subject: [PATCH 47/98] fixing tooldenied overwritten by prompt_template --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 4 +++- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 7 ++++--- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 6 +++++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 4b3fb50b85..35b7233e01 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -147,7 +147,9 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _prompt_templates.PromptTemplates = _prompt_templates.DEFAULT_PROMPT_TEMPLATES + prompt_templates: _prompt_templates.PromptTemplates = dataclasses.field( + default_factory=lambda: _prompt_templates.DEFAULT_PROMPT_TEMPLATES + ) usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 108c715fcf..95e2c1b237 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1353,16 +1353,17 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: def _get_prompt_templates( self, prompt_templates: _prompt_templates.PromptTemplates | None - ) -> _prompt_templates.PromptTemplates | None: + ) -> _prompt_templates.PromptTemplates: """Get prompt_templates for a run. If we've overridden prompt_templates via `_override_prompt_templates`, use that, - otherwise use the prompt_templates passed to the call, falling back to the agent default. + otherwise use the prompt_templates passed to the call, falling back to the agent default, + and finally falling back to the global default. """ if some_prompt_templates := self._override_prompt_templates.get(): return some_prompt_templates.value else: - return prompt_templates or self.prompt_templates + return prompt_templates or self.prompt_templates or _prompt_templates.DEFAULT_PROMPT_TEMPLATES def _normalize_instructions( self, diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 071976c560..779e1670ad 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -54,7 +54,11 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) elif message_part.return_kind == 'function-tool-not-executed': return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) elif message_part.return_kind == 'tool-denied': - return self._apply_tool_template(message_part, ctx, self.tool_call_denied) + # For tool-denied, only apply template if user configured a custom one + # The content may already have a custom message from ToolDenied + if self.tool_call_denied != DEFAULT_PROMPT_TEMPLATES.tool_call_denied: + return self._apply_tool_template(message_part, ctx, self.tool_call_denied) + return message_part elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: if isinstance(self.retry_prompt, str): return replace(message_part, retry_message=self.retry_prompt) From fee446decf99373d63aeb1c4c9ee56c343e89cbb Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 13:45:00 +0530 Subject: [PATCH 48/98] fixing string in tool denied message --- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 779e1670ad..c2b4e918b1 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -42,7 +42,7 @@ class PromptTemplates: """ - tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Tool call was denied.' + tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'The tool call was denied.' """Message sent when a tool call is denied.""" def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: @@ -54,8 +54,7 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) elif message_part.return_kind == 'function-tool-not-executed': return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) elif message_part.return_kind == 'tool-denied': - # For tool-denied, only apply template if user configured a custom one - # The content may already have a custom message from ToolDenied + # The content may already have a custom message from ToolDenied in which case we should not override it if self.tool_call_denied != DEFAULT_PROMPT_TEMPLATES.tool_call_denied: return self._apply_tool_template(message_part, ctx, self.tool_call_denied) return message_part From 45dff5102a43e87810f08623df1157f4f6578fed Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 14:02:27 +0530 Subject: [PATCH 49/98] tool return kind in google --- tests/models/test_google.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/models/test_google.py b/tests/models/test_google.py index afdc70dda7..c14870bf27 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -4566,6 +4566,7 @@ def get_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -4618,6 +4619,7 @@ def get_country() -> str: content='Mexico', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='tool-executed', ) ), PartStartEvent(index=0, part=TextPart(content='The')), From 0f729f06b4a4e6cae728c48dc5a6a9a6aefe500a Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 15:56:23 +0530 Subject: [PATCH 50/98] Adding handling for retry prompt templates --- pydantic_ai_slim/pydantic_ai/messages.py | 12 ++++-- .../pydantic_ai/prompt_templates.py | 40 +++++++++++++++---- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 53d1dcc1a0..b2254beb40 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -961,22 +961,26 @@ class RetryPromptPart: def model_response(self) -> str: """Return a string message describing why the retry is requested.""" - if self.retry_message: - # We added this based on a provided prompt template so let us use this instead of our usual string - return self.retry_message if isinstance(self.content, str): if self.tool_name is None: description = f'Validation feedback:\n{self.content}' + if self.retry_message: + description += f'\n\n{self.retry_message}' else: description = self.content + if self.retry_message: + description += f'\n\n{self.retry_message}' else: json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2) plural = isinstance(self.content, list) and len(self.content) != 1 description = ( f'{len(self.content)} validation error{"s" if plural else ""}:\n```json\n{json_errors.decode()}\n```' ) - return f'{description}\n\nFix the errors and try again.' + if self.retry_message: + description += f'\n\n{self.retry_message}' + + return description def otel_event(self, settings: InstrumentationSettings) -> Event: if self.tool_name is None: diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index c2b4e918b1..ee92abc22d 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -17,10 +17,9 @@ class PromptTemplates: Each template can be a static string or a callable that receives context and returns a string. """ - retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None - """Message sent to the model after validation failures or invalid responses. + retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' + """Default message sent to the model after validation failures or invalid responses. - Default: "Validation feedback: {errors}\\n\\nFix the errors and try again." """ final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' @@ -45,6 +44,18 @@ class PromptTemplates: tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'The tool call was denied.' """Message sent when a tool call is denied.""" + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix these validation errors and try again.' + """Message sent to the model after validation errors.""" + + model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'The previous response was invalid. Please try again.' + """Message sent to the model when a ModelRetry exception is raised and tool is present.""" + + model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'The previous response was invalid. Please try again without using any tools.' + """Message sent to the model when a ModelRetry exception is raised and no tool is present.""" + + + + def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): if message_part.return_kind == 'final-result-processed': @@ -59,11 +70,26 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) return self._apply_tool_template(message_part, ctx, self.tool_call_denied) return message_part elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: - if isinstance(self.retry_prompt, str): - return replace(message_part, retry_message=self.retry_prompt) - else: - return replace(message_part, retry_message=self.retry_prompt(message_part, ctx)) + template = self._get_template_for_retry(message_part) + return self._apply_retry_tempelate(message_part, ctx, template) return message_part # Returns the original message if no template is applied + + def _get_template_for_retry(self, message: RetryPromptPart) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: + if isinstance(message.content, str): + if message.tool_name is None: + return self.model_retry_string_no_tool + else: + return self.model_retry_string_tool + else: + return self.validation_errors_retry + + def _apply_retry_tempelate(self, message: RetryPromptPart, ctx: _RunContext[Any], template: str | Callable[[RetryPromptPart, _RunContext[Any]], str]): + if isinstance(template, str): + return replace(message, retry_message=template) + else: + return replace(message, retry_message=template(message, ctx)) + + def _apply_tool_template( self, From 3570d40c1e89d17bc9b2ce925e230174d384561a Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 16:00:42 +0530 Subject: [PATCH 51/98] Removing retry_prompt for more granular controls --- pydantic_ai_slim/pydantic_ai/messages.py | 2 +- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index b2254beb40..8f2601fef6 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -957,7 +957,7 @@ class RetryPromptPart: """Part type identifier, this is available on all parts as a discriminator.""" retry_message: str | None = None - """The retry message rendered using the user's prompt template. Used instead of the default retry message when present.""" + """The retry message rendered using the user's prompt template. It is populated after checking the conditions for the retry so that the correct template is used.""" def model_response(self) -> str: """Return a string message describing why the retry is requested.""" diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index ee92abc22d..574aa930e7 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -17,11 +17,6 @@ class PromptTemplates: Each template can be a static string or a callable that receives context and returns a string. """ - retry_prompt: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' - """Default message sent to the model after validation failures or invalid responses. - - """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' """Confirmation message sent when a final result is successfully processed. @@ -69,7 +64,7 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) if self.tool_call_denied != DEFAULT_PROMPT_TEMPLATES.tool_call_denied: return self._apply_tool_template(message_part, ctx, self.tool_call_denied) return message_part - elif isinstance(message_part, RetryPromptPart) and self.retry_prompt: + elif isinstance(message_part, RetryPromptPart): template = self._get_template_for_retry(message_part) return self._apply_retry_tempelate(message_part, ctx, template) return message_part # Returns the original message if no template is applied From 946a20b32530afba045a4c51a4d1587c682de6e7 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 16:11:35 +0530 Subject: [PATCH 52/98] fixing test snapshots --- tests/test_agent.py | 68 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index 78c8a9f044..0e3e3778a7 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -236,7 +236,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_callable(): - """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" + """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" def my_function_tool() -> str: # pragma: no cover return 'function executed' @@ -261,7 +261,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse FunctionModel(return_model), output_type=Foo, prompt_templates=PromptTemplates( - retry_prompt=lambda part, ctx: f'Custom retry message {part.content}', + validation_errors_retry=lambda part, ctx: f'Custom retry message {part.content}', final_result_processed=lambda part, ctx: f'Custom final result {part.content}', output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', @@ -277,7 +277,23 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - retry_part.model_response() + # model_response() returns validation errors + retry_message appended + assert retry_part.model_response() == snapshot("""\ +1 validation error: +```json +[ + { + "type": "int_parsing", + "loc": [ + "a" + ], + "msg": "Input should be a valid integer, unable to parse string as an integer", + "input": "wrong" + } +] +``` + +Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]""") assert result.all_messages() == snapshot( [ @@ -317,7 +333,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], - usage=RequestUsage(input_tokens=75, output_tokens=23), + usage=RequestUsage(input_tokens=106, output_tokens=23), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -353,7 +369,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse def test_prompt_templates_string_and_override_prompt_templates(): - """Test all prompt templates: retry_prompt, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" + """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" def my_function_tool() -> str: # pragma: no cover return 'function executed' @@ -378,7 +394,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse FunctionModel(return_model), output_type=Foo, prompt_templates=PromptTemplates( - retry_prompt='Custom retry message', + validation_errors_retry='Custom retry message', final_result_processed='Custom final result', output_tool_not_executed='Custom output not executed:', function_tool_not_executed='Custom function not executed', @@ -394,7 +410,23 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - retry_part.model_response() + # model_response() returns validation errors + retry_message appended + assert retry_part.model_response() == snapshot("""\ +1 validation error: +```json +[ + { + "type": "int_parsing", + "loc": [ + "a" + ], + "msg": "Input should be a valid integer, unable to parse string as an integer", + "input": "wrong" + } +] +``` + +Custom retry message""") assert result.all_messages() == snapshot( [ ModelRequest( @@ -433,7 +465,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], - usage=RequestUsage(input_tokens=54, output_tokens=23), + usage=RequestUsage(input_tokens=85, output_tokens=23), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -468,14 +500,30 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ) # Verify prompt_templates can be overridden - with agent.override(prompt_templates=PromptTemplates(retry_prompt='Custom retry message override')): + with agent.override(prompt_templates=PromptTemplates(validation_errors_retry='Custom retry message override')): result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} retry_request = result.all_messages()[2] assert isinstance(retry_request, ModelRequest) retry_part = retry_request.parts[0] assert isinstance(retry_part, RetryPromptPart) - assert retry_part.model_response() == 'Custom retry message override' + # model_response() returns validation errors + retry_message appended + assert retry_part.model_response() == snapshot("""\ +1 validation error: +```json +[ + { + "type": "int_parsing", + "loc": [ + "a" + ], + "msg": "Input should be a valid integer, unable to parse string as an integer", + "input": "wrong" + } +] +``` + +Custom retry message override""") def test_result_pydantic_model_validation_error(): From 454bda1c92105054badb5af5b1dfe198a5160a21 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 16:14:27 +0530 Subject: [PATCH 53/98] better test string --- tests/test_agent.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index 0e3e3778a7..ec6c6863c5 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -261,7 +261,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse FunctionModel(return_model), output_type=Foo, prompt_templates=PromptTemplates( - validation_errors_retry=lambda part, ctx: f'Custom retry message {part.content}', + validation_errors_retry=lambda part, ctx: f'Please fix these validation errors and try again.', final_result_processed=lambda part, ctx: f'Custom final result {part.content}', output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', @@ -293,7 +293,8 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ] ``` -Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]""") +Please fix these validation errors and try again.\ +""") assert result.all_messages() == snapshot( [ @@ -322,7 +323,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message="Custom retry message [{'type': 'int_parsing', 'loc': ('a',), 'msg': 'Input should be a valid integer, unable to parse string as an integer', 'input': 'wrong'}]", + retry_message='Please fix these validation errors and try again.', ) ], run_id=IsStr(), @@ -333,7 +334,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), ], - usage=RequestUsage(input_tokens=106, output_tokens=23), + usage=RequestUsage(input_tokens=91, output_tokens=23), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), From da87aa5b4faa3ab8879bf92fb43c4bca9bd48231 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 16:25:44 +0530 Subject: [PATCH 54/98] lint fix --- pydantic_ai_slim/pydantic_ai/messages.py | 1 - .../pydantic_ai/prompt_templates.py | 54 ++++++++++--------- tests/test_agent.py | 2 +- 3 files changed, 31 insertions(+), 26 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 8f2601fef6..78d3bccade 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -961,7 +961,6 @@ class RetryPromptPart: def model_response(self) -> str: """Return a string message describing why the retry is requested.""" - if isinstance(self.content, str): if self.tool_name is None: description = f'Validation feedback:\n{self.content}' diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 574aa930e7..7fba39af22 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -14,42 +14,43 @@ class PromptTemplates: """Templates for customizing messages that Pydantic AI sends to models. - Each template can be a static string or a callable that receives context and returns a string. + Each template can be a static string or a callable that receives the message part and + [`RunContext`][pydantic_ai.RunContext] and returns a string. """ final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' - """Confirmation message sent when a final result is successfully processed. - - """ + """Confirmation message sent when a final result is successfully processed.""" output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( 'Output tool not used - a final result was already processed.' ) - """Message sent when an output tool call is skipped because a result was already found. - - """ + """Message sent when an output tool call is skipped because a result was already found.""" function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( 'Tool not executed - a final result was already processed.' ) - """Message sent when a function tool call is skipped because a result was already found. - - """ + """Message sent when a function tool call is skipped because a result was already found.""" tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'The tool call was denied.' - """Message sent when a tool call is denied.""" + """Message sent when a tool call is denied by an approval handler. - validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix these validation errors and try again.' - """Message sent to the model after validation errors.""" + Note: Custom messages set via `ToolDenied` are preserved unless this template is explicitly overridden. + """ - model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'The previous response was invalid. Please try again.' - """Message sent to the model when a ModelRetry exception is raised and tool is present.""" + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( + 'Fix these validation errors and try again.' + ) + """Message appended to validation errors when asking the model to retry.""" - model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'The previous response was invalid. Please try again without using any tools.' - """Message sent to the model when a ModelRetry exception is raised and no tool is present.""" + model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( + 'The previous response was invalid. Please try again.' + ) + """Message sent when a `ModelRetry` exception is raised from a tool.""" - - + model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( + 'The previous response was invalid. Please try again without using any tools.' + ) + """Message sent when a `ModelRetry` exception is raised outside of a tool context.""" def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): @@ -68,8 +69,10 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) template = self._get_template_for_retry(message_part) return self._apply_retry_tempelate(message_part, ctx, template) return message_part # Returns the original message if no template is applied - - def _get_template_for_retry(self, message: RetryPromptPart) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: + + def _get_template_for_retry( + self, message: RetryPromptPart + ) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: if isinstance(message.content, str): if message.tool_name is None: return self.model_retry_string_no_tool @@ -78,14 +81,17 @@ def _get_template_for_retry(self, message: RetryPromptPart) -> str | Callable[[R else: return self.validation_errors_retry - def _apply_retry_tempelate(self, message: RetryPromptPart, ctx: _RunContext[Any], template: str | Callable[[RetryPromptPart, _RunContext[Any]], str]): + def _apply_retry_tempelate( + self, + message: RetryPromptPart, + ctx: _RunContext[Any], + template: str | Callable[[RetryPromptPart, _RunContext[Any]], str], + ): if isinstance(template, str): return replace(message, retry_message=template) else: return replace(message, retry_message=template(message, ctx)) - - def _apply_tool_template( self, message: ToolReturnPart, diff --git a/tests/test_agent.py b/tests/test_agent.py index ec6c6863c5..5a56fdec4e 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -261,7 +261,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse FunctionModel(return_model), output_type=Foo, prompt_templates=PromptTemplates( - validation_errors_retry=lambda part, ctx: f'Please fix these validation errors and try again.', + validation_errors_retry=lambda part, ctx: 'Please fix these validation errors and try again.', final_result_processed=lambda part, ctx: f'Custom final result {part.content}', output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', From 65e1321cba975f0123b8904d7a9b14659159db09 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 18:00:32 +0530 Subject: [PATCH 55/98] fix test --- .../pydantic_ai/prompt_templates.py | 6 ++--- tests/test_agent.py | 22 +++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 7fba39af22..ee7ff4e471 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -38,17 +38,17 @@ class PromptTemplates: """ validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix these validation errors and try again.' + 'Fix these errors and try again' ) """Message appended to validation errors when asking the model to retry.""" model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'The previous response was invalid. Please try again.' + 'Fix these errors and try again.' ) """Message sent when a `ModelRetry` exception is raised from a tool.""" model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'The previous response was invalid. Please try again without using any tools.' + 'Fix these errors and try again.' ) """Message sent when a `ModelRetry` exception is raised outside of a tool context.""" diff --git a/tests/test_agent.py b/tests/test_agent.py index 5a56fdec4e..871fbc7b8c 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -207,13 +207,14 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again', ) ], run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=89, output_tokens=14), + usage=RequestUsage(input_tokens=88, output_tokens=14), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -581,7 +582,8 @@ def check_b(cls, v: str) -> str: ] ``` -Fix the errors and try again.""") +Fix these errors and try again\ +""") def test_output_validator(): @@ -626,6 +628,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: tool_name='final_result', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -770,6 +773,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Please include your response in a tool call.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -1371,6 +1375,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -1452,6 +1457,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='City not found, I only know Mexico City', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -2259,13 +2265,14 @@ class CityLocation(BaseModel): ], tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix these errors and try again', ) ], run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], - usage=RequestUsage(input_tokens=87, output_tokens=12), + usage=RequestUsage(input_tokens=86, output_tokens=12), model_name='function:return_city_location:', timestamp=IsDatetime(), run_id=IsStr(), @@ -2333,6 +2340,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='City not found, I only know Mexico City', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -3030,6 +3038,7 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content="Unknown tool name: 'foobar'. No tools available.", tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -3076,6 +3085,7 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content="Unknown tool name: 'foobar'. No tools available.", tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -3496,6 +3506,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_name='unknown_tool', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again.', ), ToolReturnPart( tool_name='deferred_tool', @@ -3608,6 +3619,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_name='unknown_tool', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix these errors and try again.', ), ToolReturnPart( tool_name='deferred_tool', @@ -3667,6 +3679,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', tool_call_id='first', timestamp=IsDatetime(), + retry_message='Fix these errors and try again', ), ToolReturnPart( tool_name='final_result', @@ -4815,7 +4828,7 @@ def foo_tool(foo: Foo) -> int: 'tool_call_id': IsStr(), 'timestamp': IsStr(), 'part_kind': 'retry-prompt', - 'retry_message': None, + 'retry_message': 'Fix these errors and try again', } ], 'instructions': None, @@ -5708,6 +5721,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), From 1ef3ddc4c82c6abb4d53dbbbaf343dd7509de248 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 18:03:50 +0530 Subject: [PATCH 56/98] lint fix --- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index ee7ff4e471..5c2e6f9e72 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -37,9 +37,7 @@ class PromptTemplates: Note: Custom messages set via `ToolDenied` are preserved unless this template is explicitly overridden. """ - validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix these errors and try again' - ) + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix these errors and try again' """Message appended to validation errors when asking the model to retry.""" model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( From 05d031e8bccb8d1d1bc0f390a2e40f1f53f72569 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 19:12:19 +0530 Subject: [PATCH 57/98] fixing test for retry prompt part, adding default value --- pydantic_ai_slim/pydantic_ai/messages.py | 10 ++------- .../pydantic_ai/prompt_templates.py | 21 ++++++++++++++++--- tests/models/test_anthropic.py | 1 + tests/models/test_bedrock.py | 1 + tests/models/test_cohere.py | 1 + tests/models/test_gemini.py | 1 + tests/models/test_google.py | 2 ++ tests/models/test_groq.py | 3 +++ tests/models/test_huggingface.py | 2 ++ tests/models/test_mistral.py | 3 +++ tests/models/test_model_test.py | 1 + tests/models/test_openai.py | 1 + tests/models/test_openai_responses.py | 3 +++ tests/test_mcp.py | 1 + tests/test_temporal.py | 1 + tests/test_tools.py | 16 +++++++++++++- 16 files changed, 56 insertions(+), 12 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 045c7e0fed..4af6be4d73 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -956,7 +956,7 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" - retry_message: str | None = None + retry_message: str | None = 'Fix the errors and try again.' """The retry message rendered using the user's prompt template. It is populated after checking the conditions for the retry so that the correct template is used.""" def model_response(self) -> str: @@ -964,22 +964,16 @@ def model_response(self) -> str: if isinstance(self.content, str): if self.tool_name is None: description = f'Validation feedback:\n{self.content}' - if self.retry_message: - description += f'\n\n{self.retry_message}' else: description = self.content - if self.retry_message: - description += f'\n\n{self.retry_message}' else: json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2) plural = isinstance(self.content, list) and len(self.content) != 1 description = ( f'{len(self.content)} validation error{"s" if plural else ""}:\n```json\n{json_errors.decode()}\n```' ) - if self.retry_message: - description += f'\n\n{self.retry_message}' - return description + return f'{description}\n\n{self.retry_message}' def otel_event(self, settings: InstrumentationSettings) -> LogRecord: if self.tool_name is None: diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 5c2e6f9e72..951b61f61e 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -3,6 +3,7 @@ from collections.abc import Callable from dataclasses import dataclass, replace from typing import TYPE_CHECKING, Any +from textwrap import dedent if TYPE_CHECKING: from ._run_context import RunContext as _RunContext @@ -37,19 +38,33 @@ class PromptTemplates: Note: Custom messages set via `ToolDenied` are preserved unless this template is explicitly overridden. """ - validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix these errors and try again' + default_model_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again' + """Default message sent when a `ModelRetry` exception is raised.""" + + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again' """Message appended to validation errors when asking the model to retry.""" model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix these errors and try again.' + 'Fix the errors and try again.' ) """Message sent when a `ModelRetry` exception is raised from a tool.""" model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix these errors and try again.' + 'Fix the errors and try again.' ) """Message sent when a `ModelRetry` exception is raised outside of a tool context.""" + prompted_output_template: str = dedent( + """ + Always respond with a JSON object that's compatible with this schema: + + {schema} + + Don't include any text or Markdown fencing before or after. + """ + ) + + def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): if message_part.return_kind == 'final-result-processed': diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index ab0ea458d0..f213e6749c 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -1156,6 +1156,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index af3dade249..e729ed57ee 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -477,6 +477,7 @@ async def get_capital(country: str) -> str: tool_name='get_capital', tool_call_id='tooluse_F8LnaCMtQ0-chKTnPhNH2g', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index fd589512fa..46f6d71aaa 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -331,6 +331,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index e58ef36f51..dc44538fdd 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -710,6 +710,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_google.py b/tests/models/test_google.py index c14870bf27..4b20ba8fab 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -607,6 +607,7 @@ async def get_capital(country: str) -> str: tool_name='get_capital', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -3515,6 +3516,7 @@ class Animal(BaseModel): content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 71074fe4bf..fc2ffb5ba9 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -360,6 +360,7 @@ async def get_location(loc_name: str) -> str: content='Wrong location, please try again', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -5372,6 +5373,7 @@ async def get_something_by_name(name: str) -> str: tool_name='get_something_by_name', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', @@ -5504,6 +5506,7 @@ async def get_something_by_name(name: str) -> str: tool_name='get_something_by_name', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index d111b086fd..72483208ed 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -389,6 +389,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -831,6 +832,7 @@ def response_validator(value: str) -> str: content='Response is invalid', tool_name=None, tool_call_id=IsStr(), + retry_message='Fix the errors and try again.', timestamp=IsNow(tz=timezone.utc), ) ], diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index f7db9d5d3e..43b106ac56 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -1162,6 +1162,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -1323,6 +1324,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -1726,6 +1728,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index c6f0a30c76..07e8b88cf2 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -237,6 +237,7 @@ async def my_ret(x: int) -> str: tool_name='my_ret', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 31a9dd9aff..b90d9ed774 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -368,6 +368,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 7324c4ac0b..31d5d00d84 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -320,6 +320,7 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_location', @@ -5961,6 +5962,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -6124,6 +6126,7 @@ class Animal(BaseModel): content='Please return text or include your response in a tool call.', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_mcp.py b/tests/test_mcp.py index f737b20d92..977d61fa6e 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -1227,6 +1227,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): tool_name='get_error', tool_call_id='call_rETXZWddAGZSHyVHAxptPGgc', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 8597cb75ba..a7641aeba2 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1995,6 +1995,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien tool_name='get_weather_in_city', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_tools.py b/tests/test_tools.py index ac57ba285d..8e30158912 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1788,6 +1788,7 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -1802,6 +1803,7 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -1883,6 +1885,7 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -1897,6 +1900,7 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -1916,6 +1920,7 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -1930,6 +1935,7 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), UserPromptPart( content='I bought a banana', @@ -1957,6 +1963,7 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -1971,6 +1978,7 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), UserPromptPart( content='I bought a banana', @@ -2030,6 +2038,7 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -2044,12 +2053,14 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), RetryPromptPart( content='Apples are not available', tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -2064,6 +2075,7 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -2309,7 +2321,7 @@ def test_deferred_tool_results_serializable(): 'tool_call_id': 'foo', 'timestamp': IsDatetime(), 'part_kind': 'retry-prompt', - 'retry_message': None, + 'retry_message': 'Fix the errors and try again.', }, 'any': {'foo': 'bar'}, }, @@ -2418,6 +2430,7 @@ def always_fail(ctx: RunContext[None]) -> str: tool_name='always_fail', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -2436,6 +2449,7 @@ def always_fail(ctx: RunContext[None]) -> str: tool_name='always_fail', tool_call_id=IsStr(), timestamp=IsDatetime(), + retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), From 67234574505f35b95d7d7d3210cd4ed911a3dbd6 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 19:29:29 +0530 Subject: [PATCH 58/98] fixing test for retry prompt part, adding default value --- pydantic_ai_slim/pydantic_ai/messages.py | 6 +++++- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 4 ++-- tests/models/test_groq.py | 2 -- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 4af6be4d73..f4e0201627 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -916,6 +916,10 @@ class BuiltinToolReturnPart(BaseToolReturnPart): error_details_ta = pydantic.TypeAdapter(list[pydantic_core.ErrorDetails], config=pydantic.ConfigDict(defer_build=True)) +def _get_default_model_retry_message() -> str: + from .prompt_templates import DEFAULT_PROMPT_TEMPLATES + return cast(str, DEFAULT_PROMPT_TEMPLATES.default_model_retry) + @dataclass(repr=False) class RetryPromptPart: """A message back to a model asking it to try again. @@ -956,7 +960,7 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" - retry_message: str | None = 'Fix the errors and try again.' + retry_message: str | None = field(default_factory=_get_default_model_retry_message) """The retry message rendered using the user's prompt template. It is populated after checking the conditions for the retry so that the correct template is used.""" def model_response(self) -> str: diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 951b61f61e..9da2780299 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -38,10 +38,10 @@ class PromptTemplates: Note: Custom messages set via `ToolDenied` are preserved unless this template is explicitly overridden. """ - default_model_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again' + default_model_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' """Default message sent when a `ModelRetry` exception is raised.""" - validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again' + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' """Message appended to validation errors when asking the model to retry.""" model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index fc2ffb5ba9..6bc77303ac 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -5373,7 +5373,6 @@ async def get_something_by_name(name: str) -> str: tool_name='get_something_by_name', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', @@ -5506,7 +5505,6 @@ async def get_something_by_name(name: str) -> str: tool_name='get_something_by_name', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again', ) ], instructions='Be concise. Never use pretty double quotes, just regular ones.', From e28a4ff1f24dae5777a405ecf9cb027b993edb1e Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 19:30:42 +0530 Subject: [PATCH 59/98] fixing test for retry prompt part, adding default value --- pydantic_ai_slim/pydantic_ai/messages.py | 2 ++ pydantic_ai_slim/pydantic_ai/prompt_templates.py | 7 ++----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index f4e0201627..2763fc5c4a 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -918,8 +918,10 @@ class BuiltinToolReturnPart(BaseToolReturnPart): def _get_default_model_retry_message() -> str: from .prompt_templates import DEFAULT_PROMPT_TEMPLATES + return cast(str, DEFAULT_PROMPT_TEMPLATES.default_model_retry) + @dataclass(repr=False) class RetryPromptPart: """A message back to a model asking it to try again. diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 9da2780299..1fb900ec75 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -2,8 +2,8 @@ from collections.abc import Callable from dataclasses import dataclass, replace -from typing import TYPE_CHECKING, Any from textwrap import dedent +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from ._run_context import RunContext as _RunContext @@ -44,9 +44,7 @@ class PromptTemplates: validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' """Message appended to validation errors when asking the model to retry.""" - model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix the errors and try again.' - ) + model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' """Message sent when a `ModelRetry` exception is raised from a tool.""" model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( @@ -64,7 +62,6 @@ class PromptTemplates: """ ) - def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): if message_part.return_kind == 'final-result-processed': From a31598b9cda7819a5ff7cace51b0f65494c97976 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 19:59:34 +0530 Subject: [PATCH 60/98] adding PromptOutput --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 5 ++--- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 35b7233e01..549c49e5e0 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -396,9 +396,8 @@ async def _prepare_request_parameters( """Build tools and create an agent model.""" output_schema = ctx.deps.output_schema - prompted_output_template = ( - output_schema.template if isinstance(output_schema, _output.PromptedOutputSchema) else None - ) + prompt_templates = ctx.deps.prompt_templates + prompted_output_template = prompt_templates.get_prompted_output_template(output_schema) function_tools: list[ToolDefinition] = [] output_tools: list[ToolDefinition] = [] diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 1fb900ec75..0d64886330 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -6,8 +6,10 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: + from ._output import OutputSchema from ._run_context import RunContext as _RunContext + from .messages import ModelRequestPart, RetryPromptPart, ToolReturnPart @@ -117,5 +119,15 @@ def _apply_tool_template( message_part = replace(message_part, content=template(message, ctx)) return message_part + def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: + """Get the prompted output template for the given output schema.""" + from ._output import PromptedOutputSchema + + if not isinstance(output_schema, PromptedOutputSchema): + print('Output schema is not a PromptedOutputSchema') + return None + + return self.prompted_output_template + DEFAULT_PROMPT_TEMPLATES = PromptTemplates() From 04b6f1416cfde7c8640f3e179833789cf1c9f78d Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 20:21:55 +0530 Subject: [PATCH 61/98] fixing docs --- docs/agents.md | 1 + .../pydantic_ai/prompt_templates.py | 1 - tests/test_agent.py | 33 ++++--------------- 3 files changed, 8 insertions(+), 27 deletions(-) diff --git a/docs/agents.md b/docs/agents.md index 4dd9f9ea0e..cfe2de4c32 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -1071,6 +1071,7 @@ with capture_run_messages() as messages: # (2)! tool_name='calc_volume', tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), + retry_message='Fix the errors and try again.', ) ], run_id='...', diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 0d64886330..212c72ee30 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -124,7 +124,6 @@ def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str from ._output import PromptedOutputSchema if not isinstance(output_schema, PromptedOutputSchema): - print('Output schema is not a PromptedOutputSchema') return None return self.prompted_output_template diff --git a/tests/test_agent.py b/tests/test_agent.py index 871fbc7b8c..33d640a237 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -207,14 +207,13 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ], tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again', ) ], run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=88, output_tokens=14), + usage=RequestUsage(input_tokens=89, output_tokens=14), model_name='function:return_model:', timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), @@ -582,7 +581,7 @@ def check_b(cls, v: str) -> str: ] ``` -Fix these errors and try again\ +Fix the errors and try again.\ """) @@ -628,7 +627,6 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: tool_name='final_result', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -773,7 +771,6 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Please include your response in a tool call.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), - retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -1375,7 +1372,6 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -1454,10 +1450,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: ModelRequest( parts=[ RetryPromptPart( - content='City not found, I only know Mexico City', - tool_call_id=IsStr(), - timestamp=IsDatetime(), - retry_message='Fix these errors and try again.', + content='City not found, I only know Mexico City', tool_call_id=IsStr(), timestamp=IsDatetime() ) ], run_id=IsStr(), @@ -2265,14 +2258,13 @@ class CityLocation(BaseModel): ], tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix these errors and try again', ) ], run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='{"city": "Mexico City", "country": "Mexico"}')], - usage=RequestUsage(input_tokens=86, output_tokens=12), + usage=RequestUsage(input_tokens=87, output_tokens=12), model_name='function:return_city_location:', timestamp=IsDatetime(), run_id=IsStr(), @@ -2337,10 +2329,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: ModelRequest( parts=[ RetryPromptPart( - content='City not found, I only know Mexico City', - tool_call_id=IsStr(), - timestamp=IsDatetime(), - retry_message='Fix these errors and try again.', + content='City not found, I only know Mexico City', tool_call_id=IsStr(), timestamp=IsDatetime() ) ], run_id=IsStr(), @@ -3038,7 +3027,6 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content="Unknown tool name: 'foobar'. No tools available.", tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -3085,7 +3073,6 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: content="Unknown tool name: 'foobar'. No tools available.", tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again.', ) ], run_id=IsStr(), @@ -3506,7 +3493,6 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_name='unknown_tool', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again.', ), ToolReturnPart( tool_name='deferred_tool', @@ -3619,7 +3605,6 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_name='unknown_tool', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix these errors and try again.', ), ToolReturnPart( tool_name='deferred_tool', @@ -3679,7 +3664,6 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', tool_call_id='first', timestamp=IsDatetime(), - retry_message='Fix these errors and try again', ), ToolReturnPart( tool_name='final_result', @@ -4828,7 +4812,7 @@ def foo_tool(foo: Foo) -> int: 'tool_call_id': IsStr(), 'timestamp': IsStr(), 'part_kind': 'retry-prompt', - 'retry_message': 'Fix these errors and try again', + 'retry_message': 'Fix the errors and try again.', } ], 'instructions': None, @@ -5718,10 +5702,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ModelRequest( parts=[ RetryPromptPart( - content='Please return text or call a tool.', - tool_call_id=IsStr(), - timestamp=IsDatetime(), - retry_message='Fix these errors and try again.', + content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime() ) ], run_id=IsStr(), From c979029b43306d0aaf5106fdd2d8126fc9b840d2 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 20:52:07 +0530 Subject: [PATCH 62/98] coverage for tool-denied message --- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 4 ++-- tests/test_agent.py | 15 ++++++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 212c72ee30..510407542f 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -98,7 +98,7 @@ def _apply_retry_tempelate( message: RetryPromptPart, ctx: _RunContext[Any], template: str | Callable[[RetryPromptPart, _RunContext[Any]], str], - ): + ) -> RetryPromptPart: if isinstance(template, str): return replace(message, retry_message=template) else: @@ -109,7 +109,7 @@ def _apply_tool_template( message: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], - ): + ) -> ToolReturnPart: message_part: ToolReturnPart = message if isinstance(template, str): diff --git a/tests/test_agent.py b/tests/test_agent.py index 33d640a237..565b824c06 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -5741,7 +5741,12 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon model = FunctionModel(model_function) - agent = Agent(model, output_type=[str, DeferredToolRequests]) + # Using prompt_templates without setting tool_call_denied to cover line 78 in prompt_templates.py + agent = Agent( + model, + output_type=[str, DeferredToolRequests], + prompt_templates=PromptTemplates(tool_call_denied='Tool call denied custom message.'), + ) @agent.tool_plain(requires_approval=True) def delete_file(path: str) -> str: @@ -5865,7 +5870,7 @@ def create_file(path: str, content: str) -> str: ), ToolReturnPart( tool_name='delete_file', - content='File cannot be deleted', + content='Tool call denied custom message.', tool_call_id='never_delete', timestamp=IsDatetime(), return_kind='tool-denied', @@ -5875,7 +5880,7 @@ def create_file(path: str, content: str) -> str: ), ModelResponse( parts=[TextPart(content='Done!')], - usage=RequestUsage(input_tokens=78, output_tokens=24), + usage=RequestUsage(input_tokens=80, output_tokens=24), model_name='function:model_function:', timestamp=IsDatetime(), run_id=IsStr(), @@ -5897,7 +5902,7 @@ def create_file(path: str, content: str) -> str: ), ToolReturnPart( tool_name='delete_file', - content='File cannot be deleted', + content='Tool call denied custom message.', tool_call_id='never_delete', timestamp=IsDatetime(), return_kind='tool-denied', @@ -5907,7 +5912,7 @@ def create_file(path: str, content: str) -> str: ), ModelResponse( parts=[TextPart(content='Done!')], - usage=RequestUsage(input_tokens=78, output_tokens=24), + usage=RequestUsage(input_tokens=80, output_tokens=24), model_name='function:model_function:', timestamp=IsDatetime(), run_id=IsStr(), From 201d7f62c87eb6a05fb1d540003d0c98fa667874 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 20:57:48 +0530 Subject: [PATCH 63/98] coverage for prompted output --- tests/test_agent_output_schemas.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/test_agent_output_schemas.py b/tests/test_agent_output_schemas.py index 5c63343126..db263309e0 100644 --- a/tests/test_agent_output_schemas.py +++ b/tests/test_agent_output_schemas.py @@ -433,6 +433,28 @@ async def test_deferred_output_json_schema(): } ) + +def test_build_instructions_appends_schema_placeholder(): + """Test that build_instructions appends {schema} when template doesn't contain it.""" + from pydantic_ai._output import OutputObjectDefinition, PromptedOutputSchema + from pydantic_ai.prompt_templates import DEFAULT_PROMPT_TEMPLATES + + object_def = OutputObjectDefinition( + json_schema={'type': 'object', 'properties': {'name': {'type': 'string'}}}, + name='TestOutput', + description='A test output', + ) + # Template without {schema} - should append it automatically + + result = PromptedOutputSchema.build_instructions(DEFAULT_PROMPT_TEMPLATES.prompted_output_template, object_def) + assert result == snapshot("""\ + +Always respond with a JSON object that's compatible with this schema: + +{"type": "object", "properties": {"name": {"type": "string"}}, "title": "TestOutput", "description": "A test output"} + +Don't include any text or Markdown fencing before or after. +""") # special case of only BinaryImage and DeferredToolRequests agent = Agent('test', output_type=[BinaryImage, DeferredToolRequests]) assert agent.output_json_schema() == snapshot( From cdc477dbe056ea7598049243d58e8d0f6e0071ea Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 23:33:52 +0530 Subject: [PATCH 64/98] cleanup --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 12 +++---- .../pydantic_ai/prompt_templates.py | 31 ++++++++++--------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 549c49e5e0..4851e05969 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -522,7 +522,7 @@ async def _prepare_request( prompt_templates = ctx.deps.prompt_templates - message_history = _apply_prompt_templates(message_history, prompt_templates, run_context) + message_history = _apply_prompt_templates_to_message_history(message_history, prompt_templates, run_context) ctx.state.message_history[:] = message_history @@ -1391,16 +1391,16 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess return clean_messages -def _apply_prompt_templates( +def _apply_prompt_templates_to_message_history( messages: list[_messages.ModelMessage], prompt_templates: _prompt_templates.PromptTemplates, ctx: RunContext[Any] ) -> list[_messages.ModelMessage]: - messages_template_applied: list[_messages.ModelMessage] = [] + messages_with_templates_applied: list[_messages.ModelMessage] = [] for msg in messages: if isinstance(msg, _messages.ModelRequest): parts_template_applied = [prompt_templates.apply_template(part, ctx) for part in msg.parts] - messages_template_applied.append(replace(msg, parts=parts_template_applied)) + messages_with_templates_applied.append(replace(msg, parts=parts_template_applied)) else: - messages_template_applied.append(msg) + messages_with_templates_applied.append(msg) - return messages_template_applied + return messages_with_templates_applied \ No newline at end of file diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 510407542f..2c4f30d6a2 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -83,42 +83,43 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) return message_part # Returns the original message if no template is applied def _get_template_for_retry( - self, message: RetryPromptPart + self, message_part: RetryPromptPart ) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: - if isinstance(message.content, str): - if message.tool_name is None: - return self.model_retry_string_no_tool + template: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = self.default_model_retry + if isinstance(message_part.content, str): + if message_part.tool_name is None: + template = self.model_retry_string_no_tool else: - return self.model_retry_string_tool + template = self.model_retry_string_tool else: - return self.validation_errors_retry - + template = self.validation_errors_retry + + return template def _apply_retry_tempelate( self, - message: RetryPromptPart, + message_part: RetryPromptPart, ctx: _RunContext[Any], template: str | Callable[[RetryPromptPart, _RunContext[Any]], str], ) -> RetryPromptPart: if isinstance(template, str): - return replace(message, retry_message=template) + message_part = replace(message_part, retry_message=template) else: - return replace(message, retry_message=template(message, ctx)) + message_part = replace(message_part, retry_message=template(message_part, ctx)) + + return message_part def _apply_tool_template( self, - message: ToolReturnPart, + message_part: ToolReturnPart, ctx: _RunContext[Any], template: str | Callable[[ToolReturnPart, _RunContext[Any]], str], ) -> ToolReturnPart: - message_part: ToolReturnPart = message - if isinstance(template, str): message_part = replace(message_part, content=template) else: - message_part = replace(message_part, content=template(message, ctx)) + message_part = replace(message_part, content=template(message_part, ctx)) return message_part - def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: """Get the prompted output template for the given output schema.""" from ._output import PromptedOutputSchema From 81755bb22774fe84e73d6c3a7fad6794a5a42768 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Fri, 12 Dec 2025 23:35:32 +0530 Subject: [PATCH 65/98] lint cleanup, skeptical about cov after refactor --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 2 +- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 4851e05969..2149c1ffb2 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -1403,4 +1403,4 @@ def _apply_prompt_templates_to_message_history( else: messages_with_templates_applied.append(msg) - return messages_with_templates_applied \ No newline at end of file + return messages_with_templates_applied diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 2c4f30d6a2..2d1d960f4a 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -93,8 +93,9 @@ def _get_template_for_retry( template = self.model_retry_string_tool else: template = self.validation_errors_retry - + return template + def _apply_retry_tempelate( self, message_part: RetryPromptPart, @@ -105,7 +106,7 @@ def _apply_retry_tempelate( message_part = replace(message_part, retry_message=template) else: message_part = replace(message_part, retry_message=template(message_part, ctx)) - + return message_part def _apply_tool_template( @@ -120,6 +121,7 @@ def _apply_tool_template( else: message_part = replace(message_part, content=template(message_part, ctx)) return message_part + def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: """Get the prompted output template for the given output schema.""" from ._output import PromptedOutputSchema From 7df0a25ba1169f799fb27bd56e2e38e1fa4f2a8a Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 00:00:14 +0530 Subject: [PATCH 66/98] coverage --- tests/test_agent_output_schemas.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/tests/test_agent_output_schemas.py b/tests/test_agent_output_schemas.py index db263309e0..c187a9eb7f 100644 --- a/tests/test_agent_output_schemas.py +++ b/tests/test_agent_output_schemas.py @@ -437,24 +437,18 @@ async def test_deferred_output_json_schema(): def test_build_instructions_appends_schema_placeholder(): """Test that build_instructions appends {schema} when template doesn't contain it.""" from pydantic_ai._output import OutputObjectDefinition, PromptedOutputSchema - from pydantic_ai.prompt_templates import DEFAULT_PROMPT_TEMPLATES object_def = OutputObjectDefinition( json_schema={'type': 'object', 'properties': {'name': {'type': 'string'}}}, name='TestOutput', description='A test output', ) - # Template without {schema} - should append it automatically + template_without_schema = 'Please respond with JSON.' - result = PromptedOutputSchema.build_instructions(DEFAULT_PROMPT_TEMPLATES.prompted_output_template, object_def) - assert result == snapshot("""\ - -Always respond with a JSON object that's compatible with this schema: - -{"type": "object", "properties": {"name": {"type": "string"}}, "title": "TestOutput", "description": "A test output"} - -Don't include any text or Markdown fencing before or after. -""") + result = PromptedOutputSchema.build_instructions(template_without_schema, object_def) + assert result == snapshot( + 'Please respond with JSON.\n\n{"type": "object", "properties": {"name": {"type": "string"}}, "title": "TestOutput", "description": "A test output"}' + ) # special case of only BinaryImage and DeferredToolRequests agent = Agent('test', output_type=[BinaryImage, DeferredToolRequests]) assert agent.output_json_schema() == snapshot( From 165e79514cfc6f5c8378b115e93551cd7a72a39b Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 09:05:30 +0530 Subject: [PATCH 67/98] adding comment --- pydantic_ai_slim/pydantic_ai/prompt_templates.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_templates.py index 2d1d960f4a..eab8f08f62 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_templates.py @@ -86,6 +86,8 @@ def _get_template_for_retry( self, message_part: RetryPromptPart ) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: template: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = self.default_model_retry + # This is based no RetryPromptPart.model_response() implementation + # We follow the same structure here to populate the correct template if isinstance(message_part.content, str): if message_part.tool_name is None: template = self.model_retry_string_no_tool From f39cc66070cb0f52ba0be8f198e8f4bce64b6da1 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 10:58:17 +0530 Subject: [PATCH 68/98] Adding PromptConfig, composition templates inside PromptConfig, can add on ToolConfig as well on top of it wip --- pydantic_ai_slim/pydantic_ai/__init__.py | 5 +- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 28 +++--- .../pydantic_ai/agent/__init__.py | 65 +++++++------ .../pydantic_ai/agent/abstract.py | 68 +++++++------- pydantic_ai_slim/pydantic_ai/agent/wrapper.py | 19 ++-- .../pydantic_ai/durable_exec/dbos/_agent.py | 58 ++++++------ .../durable_exec/prefect/_agent.py | 58 ++++++------ .../durable_exec/temporal/_agent.py | 58 ++++++------ pydantic_ai_slim/pydantic_ai/messages.py | 4 +- .../{prompt_templates.py => prompt_config.py} | 94 +++++++++++++++++-- pydantic_ai_slim/pydantic_ai/tools.py | 4 +- tests/test_agent.py | 22 ++--- 12 files changed, 285 insertions(+), 198 deletions(-) rename pydantic_ai_slim/pydantic_ai/{prompt_templates.py => prompt_config.py} (61%) diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 95e111f5f1..344128ec37 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -94,7 +94,7 @@ ModelProfile, ModelProfileSpec, ) -from .prompt_templates import PromptTemplates +from .prompt_config import PromptConfig, PromptTemplates from .run import AgentRun, AgentRunResult, AgentRunResultEvent from .settings import ModelSettings from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied @@ -230,7 +230,8 @@ 'PromptedOutput', 'TextOutput', 'StructuredDict', - # prompt_templates + # prompt_config + 'PromptConfig', 'PromptTemplates', # format_prompt 'format_as_xml', diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 2149c1ffb2..97c00fb728 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -31,7 +31,7 @@ exceptions, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, result, usage as _usage, ) @@ -147,8 +147,8 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_templates: _prompt_templates.PromptTemplates = dataclasses.field( - default_factory=lambda: _prompt_templates.DEFAULT_PROMPT_TEMPLATES + prompt_config: _prompt_config.PromptConfig = dataclasses.field( + default_factory=lambda: _prompt_config.DEFAULT_PROMPT_CONFIG ) usage_limits: _usage.UsageLimits max_result_retries: int @@ -396,8 +396,8 @@ async def _prepare_request_parameters( """Build tools and create an agent model.""" output_schema = ctx.deps.output_schema - prompt_templates = ctx.deps.prompt_templates - prompted_output_template = prompt_templates.get_prompted_output_template(output_schema) + prompt_config = ctx.deps.prompt_config + prompted_output_template = prompt_config.templates.get_prompted_output_template(output_schema) function_tools: list[ToolDefinition] = [] output_tools: list[ToolDefinition] = [] @@ -520,9 +520,9 @@ async def _prepare_request( # Update the new message index to ensure `result.new_messages()` returns the correct messages ctx.deps.new_message_index -= len(original_history) - len(message_history) - prompt_templates = ctx.deps.prompt_templates + prompt_config = ctx.deps.prompt_config - message_history = _apply_prompt_templates_to_message_history(message_history, prompt_templates, run_context) + message_history = _apply_prompt_templates_to_message_history(message_history, prompt_config.templates, run_context) ctx.state.message_history[:] = message_history @@ -802,7 +802,7 @@ def _handle_final_result( if tool_responses: run_ctx = build_run_context(ctx) - tool_responses = [ctx.deps.prompt_templates.apply_template(part, run_ctx) for part in tool_responses] + tool_responses = [ctx.deps.prompt_config.templates.apply_template(part, run_ctx) for part in tool_responses] # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: @@ -890,7 +890,7 @@ async def process_tool_calls( # noqa: C901 if final_result.tool_call_id == call.tool_call_id: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.final_result_processed, + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -898,7 +898,7 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.output_tool_not_executed, + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.output_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='output-tool-not-executed', ) @@ -923,7 +923,7 @@ async def process_tool_calls( # noqa: C901 else: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.final_result_processed, + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.final_result_processed, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -937,7 +937,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.function_tool_not_executed, + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -996,7 +996,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_templates.DEFAULT_PROMPT_TEMPLATES.function_tool_not_executed, + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.function_tool_not_executed, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -1392,7 +1392,7 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess def _apply_prompt_templates_to_message_history( - messages: list[_messages.ModelMessage], prompt_templates: _prompt_templates.PromptTemplates, ctx: RunContext[Any] + messages: list[_messages.ModelMessage], prompt_templates: _prompt_config.PromptTemplates, ctx: RunContext[Any] ) -> list[_messages.ModelMessage]: messages_with_templates_applied: list[_messages.ModelMessage] = [] diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 95e2c1b237..464e3344d0 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -24,7 +24,7 @@ exceptions, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, usage as _usage, ) from .._agent_graph import ( @@ -126,8 +126,8 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): be merged with this value, with the runtime argument taking priority. """ - prompt_templates: _prompt_templates.PromptTemplates | None - """Optional prompt templates used to customize the system-injected messages for this agent.""" + prompt_config: _prompt_config.PromptConfig | None + """Optional prompt configuration used to customize the system-injected messages for this agent.""" _output_type: OutputSpec[OutputDataT] @@ -171,7 +171,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -224,7 +224,7 @@ def __init__( deps_type: type[AgentDepsT] = NoneType, name: str | None = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, retries: int = 1, validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = None, output_retries: int | None = None, @@ -258,7 +258,7 @@ def __init__( name: The name of the agent, used for logging. If `None`, we try to infer the agent name from the call frame when the agent is first run. model_settings: Optional model request settings to use for this agent's runs, by default. - prompt_templates: Optional prompt templates to customize how system-injected messages + prompt_config: Optional prompt configuration to customize how system-injected messages (like retry prompts or tool return wrappers) are rendered for this agent. retries: The default number of retries to allow for tool calls and output validation, before raising an error. For model request retries, see the [HTTP Request Retries](../retries.md) documentation. @@ -303,7 +303,7 @@ def __init__( self._name = name self.end_strategy = end_strategy self.model_settings = model_settings - self.prompt_templates = prompt_templates + self.prompt_config = prompt_config self._output_type = output_type self.instrument = instrument @@ -366,8 +366,8 @@ def __init__( self._override_instructions: ContextVar[ _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] ] = ContextVar('_override_instructions', default=None) - self._override_prompt_templates: ContextVar[_utils.Option[_prompt_templates.PromptTemplates]] = ContextVar( - '_override_prompt_templates', default=None + self._override_prompt_config: ContextVar[_utils.Option[_prompt_config.PromptConfig]] = ContextVar( + '_override_prompt_config', default=None ) self._enter_lock = Lock() @@ -436,7 +436,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -456,7 +456,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -476,7 +476,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -553,7 +553,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent's defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -605,7 +605,7 @@ async def main(): merged_settings = merge_model_settings(model_used.settings, self.model_settings) model_settings = merge_model_settings(merged_settings, model_settings) usage_limits = usage_limits or _usage.UsageLimits() - prompt_templates = self._get_prompt_templates(prompt_templates) + prompt_config = self._get_prompt_config(prompt_config) instructions_literal, instructions_functions = self._get_instructions(additional_instructions=instructions) @@ -633,7 +633,7 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: new_message_index=len(message_history) if message_history else 0, model=model_used, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, max_result_retries=self._max_result_retries, end_strategy=self.end_strategy, @@ -768,7 +768,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -782,7 +782,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates passed to the agent constructor and agent run. + prompt_config: The prompt config to use instead of the prompt config passed to the agent constructor and agent run. """ if _utils.is_set(name): name_token = self._override_name.set(_utils.Some(name)) @@ -815,10 +815,10 @@ def override( else: instructions_token = None - if _utils.is_set(prompt_templates): - prompt_templates_token = self._override_prompt_templates.set(_utils.Some(prompt_templates)) + if _utils.is_set(prompt_config): + prompt_config_token = self._override_prompt_config.set(_utils.Some(prompt_config)) else: - prompt_templates_token = None + prompt_config_token = None try: yield @@ -835,8 +835,8 @@ def override( self._override_tools.reset(tools_token) if instructions_token is not None: self._override_instructions.reset(instructions_token) - if prompt_templates_token is not None: - self._override_prompt_templates.reset(prompt_templates_token) + if prompt_config_token is not None: + self._override_prompt_config.reset(prompt_config_token) @overload def instructions( @@ -1351,19 +1351,19 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: else: return deps - def _get_prompt_templates( - self, prompt_templates: _prompt_templates.PromptTemplates | None - ) -> _prompt_templates.PromptTemplates: - """Get prompt_templates for a run. + def _get_prompt_config( + self, prompt_config: _prompt_config.PromptConfig | None + ) -> _prompt_config.PromptConfig: + """Get prompt_config for a run. - If we've overridden prompt_templates via `_override_prompt_templates`, use that, - otherwise use the prompt_templates passed to the call, falling back to the agent default, + If we've overridden prompt_config via `_override_prompt_config`, use that, + otherwise use the prompt_config passed to the call, falling back to the agent default, and finally falling back to the global default. """ - if some_prompt_templates := self._override_prompt_templates.get(): - return some_prompt_templates.value + if some_prompt_config := self._override_prompt_config.get(): + return some_prompt_config.value else: - return prompt_templates or self.prompt_templates or _prompt_templates.DEFAULT_PROMPT_TEMPLATES + return prompt_config or self.prompt_config or _prompt_config.DEFAULT_PROMPT_CONFIG def _normalize_instructions( self, @@ -1425,6 +1425,9 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools return toolset toolset = toolset.visit_and_replace(copy_dynamic_toolsets) + prompt_config = self._get_prompt_config(None) + + # Check if the prompt_config has any tool descriptions to prepare tools if self._prepare_tools: toolset = PreparedToolset(toolset, self._prepare_tools) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 0125538187..dfae29f20d 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -21,7 +21,7 @@ exceptions, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, result, usage as _usage, ) @@ -161,7 +161,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -182,7 +182,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -202,7 +202,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -237,7 +237,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -263,7 +263,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, toolsets=toolsets, @@ -291,7 +291,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -312,7 +312,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -332,7 +332,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -366,7 +366,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -391,7 +391,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -413,7 +413,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -434,7 +434,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -455,7 +455,7 @@ async def run_stream( # noqa: C901 instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -497,7 +497,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -528,7 +528,7 @@ async def main(): deps=deps, instructions=instructions, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -651,7 +651,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -671,7 +671,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -690,7 +690,7 @@ def run_stream_sync( model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -734,7 +734,7 @@ def main(): model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -760,7 +760,7 @@ async def _consume_stream(): model=model, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -785,7 +785,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -805,7 +805,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -824,7 +824,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -875,7 +875,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -902,7 +902,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, toolsets=toolsets, @@ -920,7 +920,7 @@ async def _run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -947,7 +947,7 @@ async def run_agent() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=False, @@ -977,7 +977,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -997,7 +997,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -1018,7 +1018,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -1095,7 +1095,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this specific run, falling back to the agent defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. @@ -1120,7 +1120,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -1134,7 +1134,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. + prompt_config: The prompt configuration to use instead of the prompt config registered with the agent. """ raise NotImplementedError yield diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 448438171c..7dc0d63e92 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -8,7 +8,7 @@ _utils, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, usage as _usage, ) from .._json_schema import JsonSchema @@ -85,7 +85,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -105,7 +105,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -125,7 +125,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -202,7 +202,8 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for + this specific run, falling back to the agent's defaults if omitted. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -221,7 +222,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -240,7 +241,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -254,7 +255,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates passed to the agent constructor and agent run. + prompt_config: The prompt configuration to use instead of the prompt config passed to the agent constructor and agent run. """ with self.wrapped.override( name=name, @@ -263,6 +264,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, - prompt_templates=prompt_templates, + prompt_config=prompt_config, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index af7d945cd7..4eca4619b8 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -13,7 +13,7 @@ _utils, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -137,7 +137,7 @@ async def wrapped_run_workflow( deps: AgentDepsT, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -155,7 +155,7 @@ async def wrapped_run_workflow( instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -180,7 +180,7 @@ def wrapped_run_sync_workflow( model_settings: ModelSettings | None = None, instructions: Instructions[AgentDepsT] = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -199,7 +199,7 @@ def wrapped_run_sync_workflow( deps=deps, model_settings=model_settings, usage_limits=usage_limits, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage=usage, infer_name=infer_name, toolsets=toolsets, @@ -273,7 +273,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -294,7 +294,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -314,7 +314,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -351,7 +351,7 @@ async def main(): deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. @@ -374,7 +374,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -396,7 +396,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -417,7 +417,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -437,7 +437,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -472,7 +472,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -496,7 +496,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -518,7 +518,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -539,7 +539,7 @@ def run_stream( deps: AgentDepsT = None, instructions: Instructions[AgentDepsT] = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -639,7 +639,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -659,7 +659,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -678,7 +678,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -729,7 +729,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -757,7 +757,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -778,7 +778,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -799,7 +799,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -877,7 +877,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -902,7 +902,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -922,7 +922,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -936,7 +936,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. + prompt_config: The prompt configuration to use instead of the prompt config registered with the agent. """ if _utils.is_set(model) and not isinstance(model, (DBOSModel)): raise UserError( @@ -950,6 +950,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, - prompt_templates=prompt_templates, + prompt_config=prompt_config, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 77c049b609..711d4ae247 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -16,7 +16,7 @@ _utils, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -185,7 +185,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -206,7 +206,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -226,7 +226,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -262,7 +262,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -289,7 +289,7 @@ async def wrapped_run_flow() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -314,7 +314,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -335,7 +335,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -355,7 +355,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -390,7 +390,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -419,7 +419,7 @@ def wrapped_run_sync_flow() -> AgentRunResult[Any]: instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -445,7 +445,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -466,7 +466,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -487,7 +487,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -520,7 +520,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -546,7 +546,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -569,7 +569,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -589,7 +589,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -608,7 +608,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -659,7 +659,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -685,7 +685,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -705,7 +705,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -725,7 +725,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -745,7 +745,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -822,7 +822,7 @@ async def main(): deps: Optional dependencies to use for this run. instructions: Optional additional instructions to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for this run. + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for this run. usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -847,7 +847,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -865,7 +865,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. @@ -879,7 +879,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. + prompt_config: The prompt configuration to use instead of the prompt config registered with the agent. """ if _utils.is_set(model) and not isinstance(model, PrefectModel): raise UserError( @@ -893,6 +893,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, - prompt_templates=prompt_templates, + prompt_config=prompt_config, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index e1b0e627e5..1f8b5aace0 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -21,7 +21,7 @@ _utils, messages as _messages, models, - prompt_templates as _prompt_templates, + prompt_config as _prompt_config, usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent @@ -268,7 +268,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -289,7 +289,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -309,7 +309,7 @@ async def run( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -345,7 +345,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -371,7 +371,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -393,7 +393,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -415,7 +415,7 @@ def run_sync( deps: AgentDepsT = None, model_settings: ModelSettings | None = None, usage_limits: _usage.UsageLimits | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, @@ -434,7 +434,7 @@ def run_sync( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -469,7 +469,7 @@ def run_sync( instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -494,7 +494,7 @@ def run_sync( instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -516,7 +516,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -537,7 +537,7 @@ def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -558,7 +558,7 @@ async def run_stream( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -591,7 +591,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -617,7 +617,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -640,7 +640,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -660,7 +660,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -679,7 +679,7 @@ def run_stream_events( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -730,7 +730,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -756,7 +756,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -776,7 +776,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -797,7 +797,7 @@ def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -818,7 +818,7 @@ async def iter( instructions: Instructions[AgentDepsT] = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, - prompt_templates: _prompt_templates.PromptTemplates | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, usage_limits: _usage.UsageLimits | None = None, usage: _usage.RunUsage | None = None, infer_name: bool = True, @@ -896,7 +896,7 @@ async def main(): instructions: Optional additional instructions to use for this run. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. - prompt_templates: Optional prompt templates to override how system-generated parts are phrased for + prompt_config: Optional prompt configuration to override how system-generated parts are phrased for usage_limits: Optional limits on model request count or token usage. usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. @@ -931,7 +931,7 @@ async def main(): instructions=instructions, deps=deps, model_settings=model_settings, - prompt_templates=prompt_templates, + prompt_config=prompt_config, usage_limits=usage_limits, usage=usage, infer_name=infer_name, @@ -951,7 +951,7 @@ def override( toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, - prompt_templates: _prompt_templates.PromptTemplates | _utils.Unset = _utils.UNSET, + prompt_config: _prompt_config.PromptConfig | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions. @@ -965,7 +965,7 @@ def override( toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. instructions: The instructions to use instead of the instructions registered with the agent. - prompt_templates: The prompt templates to use instead of the prompt templates registered with the agent. + prompt_config: The prompt configuration to use instead of the prompt config registered with the agent. """ if workflow.in_workflow(): if _utils.is_set(model): @@ -988,6 +988,6 @@ def override( toolsets=toolsets, tools=tools, instructions=instructions, - prompt_templates=prompt_templates, + prompt_config=prompt_config, ): yield diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 2763fc5c4a..ab9249db3a 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -917,9 +917,9 @@ class BuiltinToolReturnPart(BaseToolReturnPart): def _get_default_model_retry_message() -> str: - from .prompt_templates import DEFAULT_PROMPT_TEMPLATES + from .prompt_config import DEFAULT_PROMPT_CONFIG - return cast(str, DEFAULT_PROMPT_TEMPLATES.default_model_retry) + return cast(str, DEFAULT_PROMPT_CONFIG.templates.default_model_retry) @dataclass(repr=False) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_templates.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py similarity index 61% rename from pydantic_ai_slim/pydantic_ai/prompt_templates.py rename to pydantic_ai_slim/pydantic_ai/prompt_config.py index eab8f08f62..e0fc81eb63 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_templates.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -1,7 +1,7 @@ from __future__ import annotations as _annotations from collections.abc import Callable -from dataclasses import dataclass, replace +from dataclasses import dataclass, field, replace from textwrap import dedent from typing import TYPE_CHECKING, Any @@ -15,10 +15,33 @@ @dataclass class PromptTemplates: - """Templates for customizing messages that Pydantic AI sends to models. + """Templates for customizing system-generated messages that Pydantic AI sends to models. - Each template can be a static string or a callable that receives the message part and - [`RunContext`][pydantic_ai.RunContext] and returns a string. + Each template can be either: + - A static string that replaces the default message + - A callable that receives the message part and [`RunContext`][pydantic_ai.RunContext] + and returns a dynamically generated string + + These templates are used within [`PromptConfig`][pydantic_ai.PromptConfig] to customize + retry prompts, tool return confirmations, validation error messages, and more. + + Example: + ```python + from pydantic_ai import Agent, PromptConfig, PromptTemplates + + # Using static strings + templates = PromptTemplates( + validation_errors_retry='Please fix the validation errors.', + final_result_processed='Done!', + ) + + # Using callable for dynamic messages + templates = PromptTemplates( + validation_errors_retry=lambda part, ctx: f'Retry #{ctx.retries}: Fix the errors.', + ) + + agent = Agent('openai:gpt-4o', prompt_config=PromptConfig(templates=templates)) + ``` """ final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' @@ -74,7 +97,7 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) elif message_part.return_kind == 'tool-denied': # The content may already have a custom message from ToolDenied in which case we should not override it - if self.tool_call_denied != DEFAULT_PROMPT_TEMPLATES.tool_call_denied: + if self.tool_call_denied != DEFAULT_PROMPT_CONFIG.templates.tool_call_denied: return self._apply_tool_template(message_part, ctx, self.tool_call_denied) return message_part elif isinstance(message_part, RetryPromptPart): @@ -134,4 +157,63 @@ def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str return self.prompted_output_template -DEFAULT_PROMPT_TEMPLATES = PromptTemplates() +@dataclass +class ToolConfig: + """Configuration for customizing tool descriptions, arguments used by agents.""" + + tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) + """Custom descriptions for tools used by the agent.""" + +@dataclass +class PromptConfig: + """Configuration for customizing all strings and prompts sent to the model by Pydantic AI. + + `PromptConfig` provides a clean, extensible interface for overriding any text that + Pydantic AI sends to the model. This includes: + + - **Prompt Templates**: Messages for retry prompts, tool return confirmations, + validation errors, and other system-generated text via [`PromptTemplates`][pydantic_ai.PromptTemplates]. + - **Tool Configuration** (planned): Tool descriptions, parameter descriptions, and other + tool metadata - allowing you to override descriptions and args for tools at the agent level. + + This allows you to fully customize how your agent communicates with the model + without modifying the underlying tool or agent code. + + Example: + ```python + from pydantic_ai import Agent, PromptConfig, PromptTemplates + + agent = Agent( + 'openai:gpt-4o', + prompt_config=PromptConfig( + templates=PromptTemplates( + validation_errors_retry='Please correct the errors and try again.', + final_result_processed='Result received successfully.', + ), + ), + ) + ``` + + Attributes: + templates: Templates for customizing system-generated messages like retry prompts, + tool return confirmations, and validation error messages. + """ + + templates: PromptTemplates = field(default_factory=PromptTemplates) + """Templates for customizing system-generated messages sent to the model. + + See [`PromptTemplates`][pydantic_ai.PromptTemplates] for available template options. + """ + + tool_config: ToolConfig = field(default_factory=ToolConfig) + """Configuration for customizing tool descriptions and metadata. + See [`ToolConfig`][pydantic_ai.ToolConfig] for available configuration options. + """ + + +DEFAULT_PROMPT_CONFIG = PromptConfig() +"""The default prompt configuration used when no custom configuration is provided. + +This uses the default [`PromptTemplates`][pydantic_ai.PromptTemplates] with sensible +defaults for all system-generated messages. +""" diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index 738a47679c..fc4db527f3 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -14,7 +14,7 @@ from .builtin_tools import AbstractBuiltinTool from .exceptions import ModelRetry from .messages import RetryPromptPart, ToolCallPart, ToolReturn -from .prompt_templates import DEFAULT_PROMPT_TEMPLATES +from .prompt_config import DEFAULT_PROMPT_CONFIG __all__ = ( 'AgentDepsT', @@ -177,7 +177,7 @@ class ToolApproved: class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" - message: str = cast(str, DEFAULT_PROMPT_TEMPLATES.tool_call_denied) + message: str = cast(str, DEFAULT_PROMPT_CONFIG.templates.tool_call_denied) """The message to return to the model.""" _: KW_ONLY diff --git a/tests/test_agent.py b/tests/test_agent.py index 565b824c06..02826604f1 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -66,7 +66,7 @@ from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import OutputObjectDefinition, StructuredDict, ToolOutput -from pydantic_ai.prompt_templates import PromptTemplates +from pydantic_ai.prompt_config import PromptConfig, PromptTemplates from pydantic_ai.result import RunUsage from pydantic_ai.settings import ModelSettings from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolDefinition, ToolDenied @@ -235,7 +235,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert result.all_messages_json().startswith(b'[{"parts":[{"content":"Hello",') -def test_prompt_templates_callable(): +def test_prompt_config_callable(): """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" def my_function_tool() -> str: # pragma: no cover @@ -260,12 +260,12 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse agent = Agent( FunctionModel(return_model), output_type=Foo, - prompt_templates=PromptTemplates( + prompt_config=PromptConfig(templates=PromptTemplates( validation_errors_retry=lambda part, ctx: 'Please fix these validation errors and try again.', final_result_processed=lambda part, ctx: f'Custom final result {part.content}', output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', - ), + )), ) agent.tool_plain(my_function_tool) @@ -369,7 +369,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ) -def test_prompt_templates_string_and_override_prompt_templates(): +def test_prompt_config_string_and_override_prompt_config(): """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" def my_function_tool() -> str: # pragma: no cover @@ -394,12 +394,12 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse agent = Agent( FunctionModel(return_model), output_type=Foo, - prompt_templates=PromptTemplates( + prompt_config=PromptConfig(templates=PromptTemplates( validation_errors_retry='Custom retry message', final_result_processed='Custom final result', output_tool_not_executed='Custom output not executed:', function_tool_not_executed='Custom function not executed', - ), + )), ) agent.tool_plain(my_function_tool) @@ -500,8 +500,8 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ] ) - # Verify prompt_templates can be overridden - with agent.override(prompt_templates=PromptTemplates(validation_errors_retry='Custom retry message override')): + # Verify prompt_config can be overridden + with agent.override(prompt_config=PromptConfig(templates=PromptTemplates(validation_errors_retry='Custom retry message override'))): result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} retry_request = result.all_messages()[2] @@ -5741,11 +5741,11 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon model = FunctionModel(model_function) - # Using prompt_templates without setting tool_call_denied to cover line 78 in prompt_templates.py + # Using prompt_config without setting tool_call_denied to cover line 78 in prompt_config.py agent = Agent( model, output_type=[str, DeferredToolRequests], - prompt_templates=PromptTemplates(tool_call_denied='Tool call denied custom message.'), + prompt_config=PromptConfig(templates=PromptTemplates(tool_call_denied='Tool call denied custom message.')), ) @agent.tool_plain(requires_approval=True) From 90bfab24169f86f552893f9ee4155f6c6b220288 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 11:16:42 +0530 Subject: [PATCH 69/98] Revamping of PreparedToolSet to allow using tool_config as well --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 4 ++- .../pydantic_ai/agent/__init__.py | 12 +++---- pydantic_ai_slim/pydantic_ai/prompt_config.py | 1 + .../pydantic_ai/toolsets/prepared.py | 31 +++++++++++++++++- tests/test_agent.py | 32 +++++++++++-------- 5 files changed, 57 insertions(+), 23 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 97c00fb728..60b12a1e8b 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -522,7 +522,9 @@ async def _prepare_request( prompt_config = ctx.deps.prompt_config - message_history = _apply_prompt_templates_to_message_history(message_history, prompt_config.templates, run_context) + message_history = _apply_prompt_templates_to_message_history( + message_history, prompt_config.templates, run_context + ) ctx.state.message_history[:] = message_history diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 464e3344d0..3e7bd09950 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1351,9 +1351,7 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: else: return deps - def _get_prompt_config( - self, prompt_config: _prompt_config.PromptConfig | None - ) -> _prompt_config.PromptConfig: + def _get_prompt_config(self, prompt_config: _prompt_config.PromptConfig | None) -> _prompt_config.PromptConfig: """Get prompt_config for a run. If we've overridden prompt_config via `_override_prompt_config`, use that, @@ -1425,17 +1423,15 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools return toolset toolset = toolset.visit_and_replace(copy_dynamic_toolsets) - prompt_config = self._get_prompt_config(None) + tool_config = self._get_prompt_config(None).tool_config # Check if the prompt_config has any tool descriptions to prepare tools - if self._prepare_tools: - toolset = PreparedToolset(toolset, self._prepare_tools) + toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset if output_toolset is not None: - if self._prepare_output_tools: - output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools) + output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools, tool_config=tool_config) toolset = CombinedToolset([output_toolset, toolset]) return toolset diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index e0fc81eb63..6f8b62072b 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -164,6 +164,7 @@ class ToolConfig: tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) """Custom descriptions for tools used by the agent.""" + @dataclass class PromptConfig: """Configuration for customizing all strings and prompts sent to the model by Pydantic AI. diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index af604d4328..90f50adde3 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -2,6 +2,8 @@ from dataclasses import dataclass, replace +from pydantic_ai.prompt_config import ToolConfig + from .._run_context import AgentDepsT, RunContext from ..exceptions import UserError from ..tools import ToolsPrepareFunc @@ -16,10 +18,37 @@ class PreparedToolset(WrapperToolset[AgentDepsT]): See [toolset docs](../toolsets.md#preparing-tool-definitions) for more information. """ - prepare_func: ToolsPrepareFunc[AgentDepsT] + prepare_func: ToolsPrepareFunc[AgentDepsT] | None + tool_config: ToolConfig async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: original_tools = await super().get_tools(ctx) + + tools_after_prepare_func = await self.get_tools_from_prepare_func(original_tools, ctx) + + tools_after_tool_config = await self._get_tools_from_tool_config(tools_after_prepare_func) + + return tools_after_tool_config + + async def _get_tools_from_tool_config( + self, original_tools: dict[str, ToolsetTool[AgentDepsT]] + ) -> dict[str, ToolsetTool[AgentDepsT]]: + tool_descriptions = self.tool_config.tool_descriptions + + for tool_name, description in tool_descriptions.items(): + if tool_name in original_tools: + original_tool = original_tools[tool_name] + updated_tool_def = replace(original_tool.tool_def, description=description) + original_tools[tool_name] = replace(original_tool, tool_def=updated_tool_def) + + return original_tools + + async def get_tools_from_prepare_func( + self, original_tools: dict[str, ToolsetTool[AgentDepsT]], ctx: RunContext[AgentDepsT] + ) -> dict[str, ToolsetTool[AgentDepsT]]: + if self.prepare_func is None: + return original_tools + original_tool_defs = [tool.tool_def for tool in original_tools.values()] prepared_tool_defs_by_name = { tool_def.name: tool_def for tool_def in (await self.prepare_func(ctx, original_tool_defs) or []) diff --git a/tests/test_agent.py b/tests/test_agent.py index 02826604f1..16d4a54577 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -260,12 +260,14 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse agent = Agent( FunctionModel(return_model), output_type=Foo, - prompt_config=PromptConfig(templates=PromptTemplates( - validation_errors_retry=lambda part, ctx: 'Please fix these validation errors and try again.', - final_result_processed=lambda part, ctx: f'Custom final result {part.content}', - output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', - function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', - )), + prompt_config=PromptConfig( + templates=PromptTemplates( + validation_errors_retry=lambda part, ctx: 'Please fix these validation errors and try again.', + final_result_processed=lambda part, ctx: f'Custom final result {part.content}', + output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', + function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', + ) + ), ) agent.tool_plain(my_function_tool) @@ -394,12 +396,14 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse agent = Agent( FunctionModel(return_model), output_type=Foo, - prompt_config=PromptConfig(templates=PromptTemplates( - validation_errors_retry='Custom retry message', - final_result_processed='Custom final result', - output_tool_not_executed='Custom output not executed:', - function_tool_not_executed='Custom function not executed', - )), + prompt_config=PromptConfig( + templates=PromptTemplates( + validation_errors_retry='Custom retry message', + final_result_processed='Custom final result', + output_tool_not_executed='Custom output not executed:', + function_tool_not_executed='Custom function not executed', + ) + ), ) agent.tool_plain(my_function_tool) @@ -501,7 +505,9 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ) # Verify prompt_config can be overridden - with agent.override(prompt_config=PromptConfig(templates=PromptTemplates(validation_errors_retry='Custom retry message override'))): + with agent.override( + prompt_config=PromptConfig(templates=PromptTemplates(validation_errors_retry='Custom retry message override')) + ): result = agent.run_sync('Hello') assert result.output.model_dump() == {'a': 42, 'b': 'foo'} retry_request = result.all_messages()[2] From 12c3ad6f2e8111d828e7337eceb5e51154c61a32 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 11:32:04 +0530 Subject: [PATCH 70/98] merge conflicts ughh --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 4 +- tests/test_agent.py | 319 +------------------ tests/test_streaming.py | 33 +- 3 files changed, 22 insertions(+), 334 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 33d28c4bce..e70d0b9d28 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -890,7 +890,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.final_result_processed, tool_call_id=call.tool_call_id, - return_kind='final-result-processed', + return_kind='final-result-processed', ) output_parts.append(part) # Early strategy is chosen and final result is already set @@ -900,7 +900,7 @@ async def process_tool_calls( # noqa: C901 tool_name=call.tool_name, content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.output_tool_not_executed, tool_call_id=call.tool_call_id, - return_kind='output-tool-not-executed', + return_kind='output-tool-not-executed', ) yield _messages.FunctionToolResultEvent(part) output_parts.append(part) diff --git a/tests/test_agent.py b/tests/test_agent.py index 4f54c13aea..4a17cf2219 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -237,304 +237,6 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse assert result.all_messages_json().startswith(b'[{"parts":[{"content":"Hello",') -def test_prompt_config_callable(): - """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - - def my_function_tool() -> str: # pragma: no cover - return 'function executed' - - def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: - assert info.output_tools is not None - - if len(messages) == 1: - return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, '{"a": "wrong", "b": "foo"}')]) - - else: - assert info.function_tools is not None - return ModelResponse( - parts=[ - ToolCallPart(info.output_tools[0].name, '{"a": 42, "b": "foo"}'), # Succeeds - ToolCallPart(info.output_tools[0].name, '{"a": 99, "b": "bar"}'), # Not executed - ToolCallPart(info.function_tools[0].name, '{}'), # Not executed - ] - ) - - agent = Agent( - FunctionModel(return_model), - output_type=Foo, - prompt_config=PromptConfig( - templates=PromptTemplates( - validation_errors_retry=lambda part, ctx: 'Please fix these validation errors and try again.', - final_result_processed=lambda part, ctx: f'Custom final result {part.content}', - output_tool_not_executed=lambda part, ctx: f'Custom output not executed: {part.tool_name}', - function_tool_not_executed=lambda part, ctx: f'Custom function not executed: {part.tool_name}', - ) - ), - ) - - agent.tool_plain(my_function_tool) - - result = agent.run_sync('Hello') - assert result.output.model_dump() == {'a': 42, 'b': 'foo'} - - retry_request = result.all_messages()[2] - assert isinstance(retry_request, ModelRequest) - retry_part = retry_request.parts[0] - assert isinstance(retry_part, RetryPromptPart) - # model_response() returns validation errors + retry_message appended - assert retry_part.model_response() == snapshot("""\ -1 validation error: -```json -[ - { - "type": "int_parsing", - "loc": [ - "a" - ], - "msg": "Input should be a valid integer, unable to parse string as an integer", - "input": "wrong" - } -] -``` - -Please fix these validation errors and try again.\ -""") - - assert result.all_messages() == snapshot( - [ - ModelRequest( - parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], - run_id=IsStr(), - ), - ModelResponse( - parts=[ToolCallPart(tool_name='final_result', args='{"a": "wrong", "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=51, output_tokens=7), - model_name='function:return_model:', - timestamp=IsNow(tz=timezone.utc), - run_id=IsStr(), - ), - ModelRequest( - parts=[ - RetryPromptPart( - tool_name='final_result', - content=[ - ErrorDetails( - type='int_parsing', - loc=('a',), - msg='Input should be a valid integer, unable to parse string as an integer', - input='wrong', - ) - ], - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - retry_message='Please fix these validation errors and try again.', - ) - ], - run_id=IsStr(), - ), - ModelResponse( - parts=[ - ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), - ], - usage=RequestUsage(input_tokens=91, output_tokens=23), - model_name='function:return_model:', - timestamp=IsNow(tz=timezone.utc), - run_id=IsStr(), - ), - ModelRequest( - parts=[ - ToolReturnPart( - tool_name='final_result', - content='Custom final result Final result processed.', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='final-result-processed', - ), - ToolReturnPart( - tool_name='final_result', - content='Custom output not executed: final_result', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='output-tool-not-executed', - ), - ToolReturnPart( - tool_name='my_function_tool', - content='Custom function not executed: my_function_tool', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='function-tool-not-executed', - ), - ], - run_id=IsStr(), - ), - ] - ) - - -def test_prompt_config_string_and_override_prompt_config(): - """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" - - def my_function_tool() -> str: # pragma: no cover - return 'function executed' - - def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: - assert info.output_tools is not None - - if len(messages) == 1: - return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, '{"a": "wrong", "b": "foo"}')]) - - else: - assert info.function_tools is not None - return ModelResponse( - parts=[ - ToolCallPart(info.output_tools[0].name, '{"a": 42, "b": "foo"}'), # Succeeds - ToolCallPart(info.output_tools[0].name, '{"a": 99, "b": "bar"}'), # Not executed - ToolCallPart(info.function_tools[0].name, '{}'), # Not executed - ] - ) - - agent = Agent( - FunctionModel(return_model), - output_type=Foo, - prompt_config=PromptConfig( - templates=PromptTemplates( - validation_errors_retry='Custom retry message', - final_result_processed='Custom final result', - output_tool_not_executed='Custom output not executed:', - function_tool_not_executed='Custom function not executed', - ) - ), - ) - - agent.tool_plain(my_function_tool) - - result = agent.run_sync('Hello') - assert result.output.model_dump() == {'a': 42, 'b': 'foo'} - - retry_request = result.all_messages()[2] - assert isinstance(retry_request, ModelRequest) - retry_part = retry_request.parts[0] - assert isinstance(retry_part, RetryPromptPart) - # model_response() returns validation errors + retry_message appended - assert retry_part.model_response() == snapshot("""\ -1 validation error: -```json -[ - { - "type": "int_parsing", - "loc": [ - "a" - ], - "msg": "Input should be a valid integer, unable to parse string as an integer", - "input": "wrong" - } -] -``` - -Custom retry message""") - assert result.all_messages() == snapshot( - [ - ModelRequest( - parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], - run_id=IsStr(), - ), - ModelResponse( - parts=[ToolCallPart(tool_name='final_result', args='{"a": "wrong", "b": "foo"}', tool_call_id=IsStr())], - usage=RequestUsage(input_tokens=51, output_tokens=7), - model_name='function:return_model:', - timestamp=IsNow(tz=timezone.utc), - run_id=IsStr(), - ), - ModelRequest( - parts=[ - RetryPromptPart( - tool_name='final_result', - content=[ - { - 'type': 'int_parsing', - 'loc': ('a',), - 'msg': 'Input should be a valid integer, unable to parse string as an integer', - 'input': 'wrong', - } - ], - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - retry_message='Custom retry message', - ) - ], - run_id=IsStr(), - ), - ModelResponse( - parts=[ - ToolCallPart(tool_name='final_result', args='{"a": 42, "b": "foo"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='final_result', args='{"a": 99, "b": "bar"}', tool_call_id=IsStr()), - ToolCallPart(tool_name='my_function_tool', args='{}', tool_call_id=IsStr()), - ], - usage=RequestUsage(input_tokens=85, output_tokens=23), - model_name='function:return_model:', - timestamp=IsNow(tz=timezone.utc), - run_id=IsStr(), - ), - ModelRequest( - parts=[ - ToolReturnPart( - tool_name='final_result', - content='Custom final result', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='final-result-processed', - ), - ToolReturnPart( - tool_name='final_result', - content='Custom output not executed:', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='output-tool-not-executed', - ), - ToolReturnPart( - tool_name='my_function_tool', - content='Custom function not executed', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='function-tool-not-executed', - ), - ], - run_id=IsStr(), - ), - ] - ) - - # Verify prompt_config can be overridden - with agent.override( - prompt_config=PromptConfig(templates=PromptTemplates(validation_errors_retry='Custom retry message override')) - ): - result = agent.run_sync('Hello') - assert result.output.model_dump() == {'a': 42, 'b': 'foo'} - retry_request = result.all_messages()[2] - assert isinstance(retry_request, ModelRequest) - retry_part = retry_request.parts[0] - assert isinstance(retry_part, RetryPromptPart) - # model_response() returns validation errors + retry_message appended - assert retry_part.model_response() == snapshot("""\ -1 validation error: -```json -[ - { - "type": "int_parsing", - "loc": [ - "a" - ], - "msg": "Input should be a valid integer, unable to parse string as an integer", - "input": "wrong" - } -] -``` - -Custom retry message override""") - - def test_prompt_config_callable(): """Test all prompt templates: validation_errors_retry, final_result_processed, output_tool_not_executed, and function_tool_not_executed.""" @@ -3653,21 +3355,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='final-result-processed', - ), + ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='function-tool-not-executed', - ), + ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='function-tool-not-executed', - ), + ), ToolReturnPart( tool_name='deferred_tool', content='Tool not executed - a final result was already processed.', @@ -3803,7 +3505,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='final-result-processed', - ), + ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', @@ -4596,15 +4298,12 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: ), ModelRequest( parts=[ - timestamp=IsDatetime(), - return_kind='function-tool-not-executed', - ), ToolReturnPart( tool_name='first_output', content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - return_kind='function-tool-not-executed', + return_kind='final-result-processed', ), RetryPromptPart( content='Second output validation failed', @@ -4612,13 +4311,6 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), ), - ToolReturnPart( - tool_name='deferred_tool', - content='Tool not executed - a final result was already processed.', - tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), - return_kind='function-tool-not-executed', - ), ], run_id=IsStr(), ), @@ -4710,7 +4402,6 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: ), ], run_id=IsStr(), - return_kind='final-result-processed', ), ] ) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 250525041b..01b94c4787 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -846,21 +846,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), return_kind='final-result-processed', - ), + ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), return_kind='function-tool-not-executed', - ), + ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), return_kind='function-tool-not-executed', - ), + ), ToolReturnPart( tool_name='deferred_tool', content='Tool not executed - a final result was already processed.', @@ -941,14 +941,14 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='final-result-processed', - ), + ), ToolReturnPart( tool_name='second_output', content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), return_kind='output-tool-not-executed', - ), + ), ], run_id=IsStr(), ), @@ -1100,21 +1100,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), - return_kind='final-result-processed', + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), - return_kind='function-tool-not-executed', + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), - return_kind='function-tool-not-executed', + return_kind='function-tool-not-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool', 'deferred_tool'", @@ -1227,14 +1227,14 @@ def regular_tool(x: int) -> int: # pragma: no cover tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), return_kind='output-tool-not-executed', - ), + ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), return_kind='function-tool-not-executed', - ), + ), ], run_id=IsStr(), ), @@ -1309,7 +1309,7 @@ def regular_tool(x: int) -> int: tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), return_kind='tool-executed', - ) + ) ], run_id=IsStr(), ), @@ -1365,13 +1365,10 @@ def regular_tool(x: int) -> int: parts=[ ToolReturnPart( tool_name='regular_tool', - - content=0, + content=0, tool_call_id=IsStr(), - - timestamp=IsNow(tz=datetime.timezone.utc), - , - return_kind='tool-executed', + timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -1477,7 +1474,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), - return_kind='final-result-processed', + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', From 4069ad8ef45acfbe8aca6a3a76eb70a1d37ed9b7 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 12:02:59 +0530 Subject: [PATCH 71/98] fixes --- .../pydantic_ai/agent/__init__.py | 6 ++-- .../pydantic_ai/toolsets/prepared.py | 5 ++- tests/test_agent.py | 33 +++++++++++++++++-- 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 2f8d3396b4..deafe9ed8b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1452,11 +1452,13 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools # Check if the prompt_config has any tool descriptions to prepare tools - toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) + if self._prepare_tools or tool_config: + toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset if output_toolset is not None: - output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools, tool_config=tool_config) + if self._prepare_output_tools or tool_config: + output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools, tool_config=tool_config) toolset = CombinedToolset([output_toolset, toolset]) return toolset diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 90f50adde3..5513844913 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -19,7 +19,7 @@ class PreparedToolset(WrapperToolset[AgentDepsT]): """ prepare_func: ToolsPrepareFunc[AgentDepsT] | None - tool_config: ToolConfig + tool_config: ToolConfig | None = None async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: original_tools = await super().get_tools(ctx) @@ -33,6 +33,9 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ async def _get_tools_from_tool_config( self, original_tools: dict[str, ToolsetTool[AgentDepsT]] ) -> dict[str, ToolsetTool[AgentDepsT]]: + if self.tool_config is None: + return original_tools + tool_descriptions = self.tool_config.tool_descriptions for tool_name, description in tool_descriptions.items(): diff --git a/tests/test_agent.py b/tests/test_agent.py index 4a17cf2219..4022aac578 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -68,7 +68,7 @@ from pydantic_ai.models.function import AgentInfo, DeltaToolCall, DeltaToolCalls, FunctionModel from pydantic_ai.models.test import TestModel from pydantic_ai.output import OutputObjectDefinition, StructuredDict, ToolOutput -from pydantic_ai.prompt_config import PromptConfig, PromptTemplates +from pydantic_ai.prompt_config import PromptConfig, PromptTemplates, ToolConfig from pydantic_ai.result import RunUsage from pydantic_ai.settings import ModelSettings from pydantic_ai.tools import DeferredToolRequests, DeferredToolResults, ToolDefinition, ToolDenied @@ -535,6 +535,32 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse Custom retry message override""") +def test_prompt_config_tool_config_descriptions(): + """Test that ToolConfig.tool_descriptions updates tool descriptions at the agent level.""" + + def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + # Verify the tool description was updated + assert info.function_tools is not None + my_tool = next(t for t in info.function_tools if t.name == 'my_tool') + assert my_tool.description == 'Custom tool description from ToolConfig' + return ModelResponse(parts=[TextPart('Done')]) + + agent = Agent( + FunctionModel(return_model), + prompt_config=PromptConfig( + tool_config=ToolConfig(tool_descriptions={'my_tool': 'Custom tool description from ToolConfig'}) + ), + ) + + @agent.tool_plain + def my_tool(x: int) -> int: + """Original description that should be overridden""" + return x * 2 + + result = agent.run_sync('Hello') + assert result.output == 'Done' + + def test_result_pydantic_model_validation_error(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None @@ -3447,16 +3473,17 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='second_output', content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', ), ], run_id=IsStr(), - return_kind='function-tool-not-executed', ), ] ) @@ -3873,10 +3900,10 @@ def regular_tool(x: int) -> int: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), - return_kind='output-tool-not-executed', ), ] ) From 378d0e6ad874fe5c33f81660c78641d48e6313d5 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 12:34:46 +0530 Subject: [PATCH 72/98] test fixes --- tests/test_agent.py | 17 ++++++++++++++++- tests/test_streaming.py | 18 +++++++++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/tests/test_agent.py b/tests/test_agent.py index 4022aac578..32b616a231 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -3401,6 +3401,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -3538,6 +3539,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='output-tool-not-executed', ), ], run_id=IsStr(), @@ -3622,18 +3624,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsDatetime(), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool', 'deferred_tool'", @@ -3646,6 +3651,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -3733,18 +3739,21 @@ def regular_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ToolReturnPart( tool_name='external_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -3821,6 +3830,7 @@ def regular_tool(x: int) -> int: content=1, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -3876,6 +3886,7 @@ def regular_tool(x: int) -> int: content=0, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ) ], run_id=IsStr(), @@ -3993,7 +4004,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - return_kind='output-tool-not-executed', + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', @@ -4093,12 +4104,14 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='second_output', content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ], run_id=IsStr(), @@ -4177,6 +4190,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ], run_id=IsStr(), @@ -4426,6 +4440,7 @@ def return_model(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id='second', + return_kind='final-result-processed', ), ], run_id=IsStr(), diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 01b94c4787..b6f944ea9e 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -866,6 +866,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -994,12 +995,14 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='final_result', content='Output tool not used - a final result was already processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='output-tool-not-executed', ), ], run_id=IsStr(), @@ -1127,6 +1130,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -1394,6 +1398,7 @@ def regular_tool(x: int) -> int: content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='final-result-processed', ) ], run_id=IsStr(), @@ -1481,15 +1486,21 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Final result processed.', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='regular_tool', content=42, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), ToolReturnPart( - tool_name='another_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) + tool_name='another_tool', + content=2, + tool_call_id=IsStr(), + timestamp=IsNow(tz=timezone.utc), + return_kind='tool-executed', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool', 'deferred_tool'", @@ -1502,6 +1513,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='function-tool-not-executed', ), ], run_id=IsStr(), @@ -1576,12 +1588,14 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='second_output', content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ], run_id=IsStr(), @@ -1737,6 +1751,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), + return_kind='final-result-processed', ), ToolReturnPart( tool_name='second_output', @@ -1821,6 +1836,7 @@ async def stream_function(_: list[ModelMessage], info: AgentInfo) -> AsyncIterat content='Final result processed.', tool_call_id=IsStr(), timestamp=IsNow(tz=datetime.timezone.utc), + return_kind='final-result-processed', ), RetryPromptPart( content='Second output validation failed', From 2c1fe89c02a0004cfdad3a17d1f2fa4ba06735e0 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 13:01:12 +0530 Subject: [PATCH 73/98] docs --- docs/agents.md | 118 +++++++++++++++++++++++ docs/api/prompt_config.md | 10 ++ mkdocs.yml | 1 + pydantic_ai_slim/pydantic_ai/__init__.py | 3 +- 4 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 docs/api/prompt_config.md diff --git a/docs/agents.md b/docs/agents.md index 5cedc7d1a1..1bb02e7f59 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -15,6 +15,7 @@ The [`Agent`][pydantic_ai.Agent] class has full API documentation, but conceptua | [Dependency type constraint](dependencies.md) | Dynamic instructions functions, tools, and output functions may all use dependencies when they're run. | | [LLM model](api/models/base.md) | Optional default LLM model associated with the agent. Can also be specified when running the agent. | | [Model Settings](#additional-configuration) | Optional default model settings to help fine tune requests. Can also be specified when running the agent. | +| [Prompt Configuration](#prompt-configuration) | Optional configuration for customizing system-generated messages, tool descriptions, and retry prompts. | In typing terms, agents are generic in their dependency and output types, e.g., an agent which required dependencies of type `#!python Foobar` and produced outputs of type `#!python list[str]` would have type `Agent[Foobar, list[str]]`. In practice, you shouldn't need to care about this, it should just mean your IDE can tell you when you have the right type, and if you choose to use [static type checking](#static-type-checking) it should work well with Pydantic AI. @@ -751,6 +752,123 @@ except UnexpectedModelBehavior as e: 1. This error is raised because the safety thresholds were exceeded. +### Prompt Configuration + +Pydantic AI provides [`PromptConfig`][pydantic_ai.PromptConfig] to customize the system-generated messages +that are sent to models during agent runs. This includes retry prompts, tool return confirmations, +validation error messages, and tool descriptions. + +#### Customizing System Messages with PromptTemplates + +[`PromptTemplates`][pydantic_ai.PromptTemplates] allows you to override the default messages that Pydantic AI +sends to the model for retries, tool results, and other system-generated content. + +```python {title="prompt_templates_example.py"} +from pydantic_ai import Agent, PromptConfig, PromptTemplates + +# Using static strings +agent = Agent( + 'openai:gpt-5', + prompt_config=PromptConfig( + templates=PromptTemplates( + validation_errors_retry='Please correct the validation errors and try again.', + final_result_processed='Result received successfully.', + ), + ), +) +``` + +You can also use callable functions for dynamic messages that have access to the message part +and the [`RunContext`][pydantic_ai.RunContext]: + +```python {title="prompt_templates_dynamic.py"} +from pydantic_ai import Agent, PromptConfig, PromptTemplates, RunContext +from pydantic_ai.messages import RetryPromptPart + +def custom_retry_message(part: RetryPromptPart, ctx: RunContext) -> str: + return f'Attempt #{ctx.retries + 1}: Please fix the errors and try again.' + +agent = Agent( + 'openai:gpt-5', + prompt_config=PromptConfig( + templates=PromptTemplates( + validation_errors_retry=custom_retry_message, + ), + ), +) +``` + +The available template fields in [`PromptTemplates`][pydantic_ai.PromptTemplates] include: + +| Template Field | Description | +|----------------|-------------| +| `final_result_processed` | Confirmation message when a final result is successfully processed | +| `output_tool_not_executed` | Message when an output tool call is skipped because a result was already found | +| `function_tool_not_executed` | Message when a function tool call is skipped because a result was already found | +| `tool_call_denied` | Message when a tool call is denied by an approval handler | +| `validation_errors_retry` | Message appended to validation errors when asking the model to retry | +| `model_retry_string_tool` | Message when a `ModelRetry` exception is raised from a tool | +| `model_retry_string_no_tool` | Message when a `ModelRetry` exception is raised outside of a tool context | + +#### Customizing Tool Descriptions with ToolConfig + +[`ToolConfig`][pydantic_ai.ToolConfig] allows you to override tool descriptions at runtime without modifying +the original tool definitions. This is useful when you want to provide different descriptions for the same +tool in different contexts or agent runs. + +```python {title="tool_config_example.py"} +from pydantic_ai import Agent, PromptConfig, ToolConfig + +agent = Agent( + 'openai:gpt-5', + prompt_config=PromptConfig( + tool_config=ToolConfig( + tool_descriptions={ + 'search_database': 'Search the customer database for user records by name or email.', + 'send_notification': 'Send an urgent notification to the user via their preferred channel.', + } + ), + ), +) + + +@agent.tool_plain +def search_database(query: str) -> list[str]: + """Original description that will be overridden.""" + return ['result1', 'result2'] + + +@agent.tool_plain +def send_notification(user_id: str, message: str) -> bool: + """Original description that will be overridden.""" + return True +``` + +You can also override `prompt_config` at runtime using the `prompt_config` parameter in the run methods, +or temporarily using [`agent.override()`][pydantic_ai.Agent.override]: + +```python {title="prompt_config_override.py"} +from pydantic_ai import Agent, PromptConfig, PromptTemplates + +agent = Agent('openai:gpt-5') + +# Override at runtime +result = agent.run_sync( + 'Hello', + prompt_config=PromptConfig( + templates=PromptTemplates(validation_errors_retry='Custom retry message for this run.') + ), +) + +# Or use agent.override() context manager +with agent.override( + prompt_config=PromptConfig( + templates=PromptTemplates(validation_errors_retry='Another custom message.') + ) +): + result = agent.run_sync('Hello') +``` + ## Runs vs. Conversations An agent **run** might represent an entire conversation — there's no limit to how many messages can be exchanged in a single run. However, a **conversation** might also be composed of multiple runs, especially if you need to maintain state between separate interactions or API calls. diff --git a/docs/api/prompt_config.md b/docs/api/prompt_config.md new file mode 100644 index 0000000000..906cf2f05e --- /dev/null +++ b/docs/api/prompt_config.md @@ -0,0 +1,10 @@ +# `pydantic_ai.prompt_config` + +::: pydantic_ai.prompt_config + options: + inherited_members: true + members: + - PromptConfig + - PromptTemplates + - ToolConfig + diff --git a/mkdocs.yml b/mkdocs.yml index 1b46d5250c..fa9bb34840 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -163,6 +163,7 @@ nav: - api/models/test.md - api/models/wrapper.md - api/profiles.md + - api/prompt_config.md - api/providers.md - api/retries.md - api/run.md diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 344128ec37..97191a7ac8 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -94,7 +94,7 @@ ModelProfile, ModelProfileSpec, ) -from .prompt_config import PromptConfig, PromptTemplates +from .prompt_config import PromptConfig, PromptTemplates, ToolConfig from .run import AgentRun, AgentRunResult, AgentRunResultEvent from .settings import ModelSettings from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied @@ -233,6 +233,7 @@ # prompt_config 'PromptConfig', 'PromptTemplates', + 'ToolConfig', # format_prompt 'format_as_xml', # settings From 2c74c5aac00c1e7bef782c6cafdd6d5951521848 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 13:14:55 +0530 Subject: [PATCH 74/98] fixing not passing prompt_config via iter --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 4 +--- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 8 +++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index e70d0b9d28..ad417d575a 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -797,12 +797,10 @@ def _handle_final_result( ) -> End[result.FinalResult[NodeRunEndT]]: messages = ctx.state.message_history + # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: run_ctx = build_run_context(ctx) tool_responses = [ctx.deps.prompt_config.templates.apply_template(part, run_ctx) for part in tool_responses] - - # For backwards compatibility, append a new ModelRequest using the tool returns and retries - if tool_responses: messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) return End(final_result) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index deafe9ed8b..6f89bdc543 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -594,6 +594,7 @@ async def main(): # may change the result type from the restricted type to something else. Therefore, we consider the following # typecast reasonable, even though it is possible to violate it with otherwise-type-checked code. output_validators = self._output_validators + prompt_config = self._get_prompt_config(prompt_config) output_toolset = self._output_toolset if output_schema != self._output_schema or output_validators: @@ -601,7 +602,7 @@ async def main(): if output_toolset: output_toolset.max_retries = self._max_result_retries output_toolset.output_validators = output_validators - toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets) + toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets, prompt_config=prompt_config) tool_manager = ToolManager[AgentDepsT](toolset) # Build the graph @@ -620,7 +621,6 @@ async def main(): merged_settings = merge_model_settings(model_used.settings, self.model_settings) model_settings = merge_model_settings(merged_settings, model_settings) usage_limits = usage_limits or _usage.UsageLimits() - prompt_config = self._get_prompt_config(prompt_config) instructions_literal, instructions_functions = self._get_instructions(additional_instructions=instructions) @@ -1426,12 +1426,14 @@ def _get_toolset( self, output_toolset: AbstractToolset[AgentDepsT] | None | _utils.Unset = _utils.UNSET, additional_toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + prompt_config: _prompt_config.PromptConfig | None = None, ) -> AbstractToolset[AgentDepsT]: """Get the complete toolset. Args: output_toolset: The output toolset to use instead of the one built at agent construction time. additional_toolsets: Additional toolsets to add, unless toolsets have been overridden. + prompt_config: The prompt config to use for tool descriptions. If None, uses agent-level or default. """ toolsets = self.toolsets # Don't add additional toolsets if the toolsets have been overridden @@ -1448,7 +1450,7 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools return toolset toolset = toolset.visit_and_replace(copy_dynamic_toolsets) - tool_config = self._get_prompt_config(None).tool_config + tool_config = self._get_prompt_config(prompt_config).tool_config # Check if the prompt_config has any tool descriptions to prepare tools From c36ee12cb7e4724b298894aab3b721c5817329d0 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 13:44:25 +0530 Subject: [PATCH 75/98] better test for tool config overriding check --- tests/test_agent.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tests/test_agent.py b/tests/test_agent.py index 32b616a231..84b6646f90 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -561,6 +561,45 @@ def my_tool(x: int) -> int: assert result.output == 'Done' +def test_prompt_config_tool_config_descriptions_at_runtime(): + """Test that ToolConfig.tool_descriptions passed to run_sync() overrides agent-level prompt_config.""" + observed_descriptions: list[str | None] = [] + + def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.function_tools is not None + basic_tool = next(tool for tool in info.function_tools if tool.name == 'basic_tool') + observed_descriptions.append(basic_tool.description) + return ModelResponse(parts=[TextPart('Done')]) + + # Agent with agent-level prompt_config + agent = Agent( + FunctionModel(return_model), + prompt_config=PromptConfig( + tool_config=ToolConfig(tool_descriptions={'basic_tool': 'Agent-level tool description'}) + ), + ) + + @agent.tool_plain + def basic_tool(x: int) -> int: + """Original description that should be overridden""" + return x * 2 + + # First run: no runtime prompt_config, should use agent-level description + result = agent.run_sync('Hello') + assert result.output == 'Done' + assert observed_descriptions[-1] == 'Agent-level tool description' + + # Second run: pass runtime prompt_config, should override agent-level description + result = agent.run_sync( + 'Hello', + prompt_config=PromptConfig( + tool_config=ToolConfig(tool_descriptions={'basic_tool': 'Runtime custom tool description'}) + ), + ) + assert result.output == 'Done' + assert observed_descriptions[-1] == 'Runtime custom tool description' + + def test_result_pydantic_model_validation_error(): def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: assert info.output_tools is not None From 2bde52aa1cd53197704b73e04ac9a38ef79fd02d Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 13:44:40 +0530 Subject: [PATCH 76/98] lint cleanup --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 6f89bdc543..1f2d5d9bd8 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -602,7 +602,9 @@ async def main(): if output_toolset: output_toolset.max_retries = self._max_result_retries output_toolset.output_validators = output_validators - toolset = self._get_toolset(output_toolset=output_toolset, additional_toolsets=toolsets, prompt_config=prompt_config) + toolset = self._get_toolset( + output_toolset=output_toolset, additional_toolsets=toolsets, prompt_config=prompt_config + ) tool_manager = ToolManager[AgentDepsT](toolset) # Build the graph From 18b2fa821f829a357c8decd30fb9e7d694cdf685 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 13:56:01 +0530 Subject: [PATCH 77/98] fixing order --- pydantic_ai_slim/pydantic_ai/toolsets/prepared.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 5513844913..31951cf524 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -24,11 +24,12 @@ class PreparedToolset(WrapperToolset[AgentDepsT]): async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: original_tools = await super().get_tools(ctx) - tools_after_prepare_func = await self.get_tools_from_prepare_func(original_tools, ctx) + tools_after_tool_config = await self._get_tools_from_tool_config(original_tools) + # If tool config is not present we will get original tools back which we can then pass onto prepare function + tools_after_prepare_func = await self.get_tools_from_prepare_func(tools_after_tool_config, ctx) + # If prepare function is not present we will get tools_after_tool_config(which could be original tools if tool config was also not present) back which we can then return - tools_after_tool_config = await self._get_tools_from_tool_config(tools_after_prepare_func) - - return tools_after_tool_config + return tools_after_prepare_func async def _get_tools_from_tool_config( self, original_tools: dict[str, ToolsetTool[AgentDepsT]] From ca0c29c24d9832965ff7355fa520866a452e96aa Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 14:11:01 +0530 Subject: [PATCH 78/98] fixing doc --- docs/api/prompt_config.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/api/prompt_config.md b/docs/api/prompt_config.md index 906cf2f05e..1b677292a7 100644 --- a/docs/api/prompt_config.md +++ b/docs/api/prompt_config.md @@ -7,4 +7,3 @@ - PromptConfig - PromptTemplates - ToolConfig - From e5285f04cc87a5768796fd1547296b2842943ec0 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 14:24:59 +0530 Subject: [PATCH 79/98] fixing doc --- docs/agents.md | 4 +++- tests/test_examples.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/agents.md b/docs/agents.md index 1bb02e7f59..d70f7b76e9 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -782,8 +782,10 @@ You can also use callable functions for dynamic messages that have access to the and the [`RunContext`][pydantic_ai.RunContext]: ```python {title="prompt_templates_dynamic.py"} -from pydantic_ai import Agent, PromptConfig, PromptTemplates, RunContext +from pydantic_ai import Agent, PromptConfig, PromptTemplates from pydantic_ai.messages import RetryPromptPart +from pydantic_ai.tools import RunContext + def custom_retry_message(part: RetryPromptPart, ctx: RunContext) -> str: return f'Attempt #{ctx.retries + 1}: Please fix the errors and try again.' diff --git a/tests/test_examples.py b/tests/test_examples.py index 8ed0828250..b60f3d0ee2 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -324,6 +324,7 @@ async def call_tool( text_responses: dict[str, str | ToolCallPart | Sequence[ToolCallPart]] = { + 'Hello': 'Hello! How can I help you today?', 'Use the web to get the current time.': "In San Francisco, it's 8:21:41 pm PDT on Wednesday, August 6, 2025.", 'Give me a sentence with the biggest news in AI this week.': 'Scientists have developed a universal AI detector that can identify deepfake videos.', 'How many days between 2000-01-01 and 2025-03-18?': 'There are 9,208 days between January 1, 2000, and March 18, 2025.', From ecf13ce7ba384e70d70e1b560af7dc91baf99303 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 21:48:16 +0530 Subject: [PATCH 80/98] changes for coverage --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 5 ++--- pydantic_ai_slim/pydantic_ai/prompt_config.py | 4 ++++ tests/test_agent.py | 11 ++++++++--- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 1f2d5d9bd8..8b10f36242 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1453,10 +1453,9 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools toolset = toolset.visit_and_replace(copy_dynamic_toolsets) tool_config = self._get_prompt_config(prompt_config).tool_config + has_tool_desccriptions = tool_config.has_tool_descriptions() - # Check if the prompt_config has any tool descriptions to prepare tools - - if self._prepare_tools or tool_config: + if self._prepare_tools or has_tool_desccriptions: toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 6f8b62072b..6cf03f5a47 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -164,6 +164,10 @@ class ToolConfig: tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) """Custom descriptions for tools used by the agent.""" + def has_tool_descriptions(self) -> bool: + """Check if any tool descriptions are set(not an empty dict).""" + return bool(len(self.tool_descriptions) > 0) + @dataclass class PromptConfig: diff --git a/tests/test_agent.py b/tests/test_agent.py index 84b6646f90..49a917911b 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -553,7 +553,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse ) @agent.tool_plain - def my_tool(x: int) -> int: + def my_tool(x: int) -> int: # pragma: no cover """Original description that should be overridden""" return x * 2 @@ -575,12 +575,17 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse agent = Agent( FunctionModel(return_model), prompt_config=PromptConfig( - tool_config=ToolConfig(tool_descriptions={'basic_tool': 'Agent-level tool description'}) + tool_config=ToolConfig( + tool_descriptions={ + 'basic_tool': 'Agent-level tool description', + 'not_present_basic_tool': 'Should not be used', + } + ) ), ) @agent.tool_plain - def basic_tool(x: int) -> int: + def basic_tool(x: int) -> int: # pragma: no cover """Original description that should be overridden""" return x * 2 From 60c1f89600d0b2313b855bcb6a2310951eaf7750 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sat, 13 Dec 2025 22:59:09 +0530 Subject: [PATCH 81/98] changes for coverage --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 8b10f36242..8a18205a9b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1460,7 +1460,7 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset if output_toolset is not None: - if self._prepare_output_tools or tool_config: + if self._prepare_output_tools or has_tool_desccriptions: output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools, tool_config=tool_config) toolset = CombinedToolset([output_toolset, toolset]) From b0aa8374b73140c0fc43256f98edd3aee924b393 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Sun, 14 Dec 2025 09:20:24 +0530 Subject: [PATCH 82/98] toolconfig could be none --- pydantic_ai_slim/pydantic_ai/agent/__init__.py | 5 ++--- pydantic_ai_slim/pydantic_ai/prompt_config.py | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 8a18205a9b..c0b9624096 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1453,14 +1453,13 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools toolset = toolset.visit_and_replace(copy_dynamic_toolsets) tool_config = self._get_prompt_config(prompt_config).tool_config - has_tool_desccriptions = tool_config.has_tool_descriptions() - if self._prepare_tools or has_tool_desccriptions: + if self._prepare_tools or tool_config: toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) output_toolset = output_toolset if _utils.is_set(output_toolset) else self._output_toolset if output_toolset is not None: - if self._prepare_output_tools or has_tool_desccriptions: + if self._prepare_output_tools or tool_config: output_toolset = PreparedToolset(output_toolset, self._prepare_output_tools, tool_config=tool_config) toolset = CombinedToolset([output_toolset, toolset]) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 6cf03f5a47..b62e899971 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -164,10 +164,6 @@ class ToolConfig: tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) """Custom descriptions for tools used by the agent.""" - def has_tool_descriptions(self) -> bool: - """Check if any tool descriptions are set(not an empty dict).""" - return bool(len(self.tool_descriptions) > 0) - @dataclass class PromptConfig: @@ -210,12 +206,24 @@ class PromptConfig: See [`PromptTemplates`][pydantic_ai.PromptTemplates] for available template options. """ - tool_config: ToolConfig = field(default_factory=ToolConfig) + tool_config: ToolConfig | None = None """Configuration for customizing tool descriptions and metadata. See [`ToolConfig`][pydantic_ai.ToolConfig] for available configuration options. """ +# @dataclass +# class InstructionsConfig: +# """ +# Configuration options to override instructions sent to the model. +# """ + + # instructions: + # It seems like runtime instuctions being passed to one of the run methods almost do the same thing. + # Why do we need this then? + + + DEFAULT_PROMPT_CONFIG = PromptConfig() """The default prompt configuration used when no custom configuration is provided. From aeef8badb23a539f17c587de6970f7eac82331cf Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 19:40:13 +0530 Subject: [PATCH 83/98] Adding ToolConfig for tool_args description change in runtime --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 39 +++++++++++------ .../pydantic_ai/toolsets/prepared.py | 23 +++++++++- tests/test_tools.py | 42 +++++++++++++++++++ 3 files changed, 90 insertions(+), 14 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index b62e899971..9fc65ca232 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -164,6 +164,33 @@ class ToolConfig: tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) """Custom descriptions for tools used by the agent.""" + tool_args_descriptions: dict[str, dict[str, str]] = field(default_factory=lambda: {}) + """Custom descriptions for tool arguments for the tools used by the agent. + Structured as a nested dictionary: + { + 'tool_name': { + 'arg_name': 'arg_description', + ... + }, + ... + } + """ + + def get_tool_args_for_tool(self, tool_name: str) -> dict[str, str] | None: + """Get the tool argument descriptions for the given tool name.""" + return self.tool_args_descriptions.get(tool_name) + + def get_description_for_tool(self, tool_name: str) -> str | None: + """Get the tool description for the given tool name.""" + return self.tool_descriptions.get(tool_name) + + def get_tool_arg_description(self, tool_name: str, arg_name: str) -> str | None: + """Get the tool argument description for the given tool name and argument name.""" + tool_args = self.get_tool_args_for_tool(tool_name) + if tool_args is None: + return None + return tool_args.get(arg_name) + @dataclass class PromptConfig: @@ -212,18 +239,6 @@ class PromptConfig: """ -# @dataclass -# class InstructionsConfig: -# """ -# Configuration options to override instructions sent to the model. -# """ - - # instructions: - # It seems like runtime instuctions being passed to one of the run methods almost do the same thing. - # Why do we need this then? - - - DEFAULT_PROMPT_CONFIG = PromptConfig() """The default prompt configuration used when no custom configuration is provided. diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 31951cf524..93d05461db 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -21,7 +21,9 @@ class PreparedToolset(WrapperToolset[AgentDepsT]): prepare_func: ToolsPrepareFunc[AgentDepsT] | None tool_config: ToolConfig | None = None - async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: + async def get_tools( + self, ctx: RunContext[AgentDepsT], tool_config: ToolConfig | None = None + ) -> dict[str, ToolsetTool[AgentDepsT]]: original_tools = await super().get_tools(ctx) tools_after_tool_config = await self._get_tools_from_tool_config(original_tools) @@ -32,7 +34,8 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ return tools_after_prepare_func async def _get_tools_from_tool_config( - self, original_tools: dict[str, ToolsetTool[AgentDepsT]] + self, + original_tools: dict[str, ToolsetTool[AgentDepsT]], ) -> dict[str, ToolsetTool[AgentDepsT]]: if self.tool_config is None: return original_tools @@ -45,6 +48,22 @@ async def _get_tools_from_tool_config( updated_tool_def = replace(original_tool.tool_def, description=description) original_tools[tool_name] = replace(original_tool, tool_def=updated_tool_def) + for tool_name in list(original_tools.keys()): + tool_args = self.tool_config.get_tool_args_for_tool(tool_name) + if not tool_args: + continue + + original_tool = original_tools[tool_name].tool_def + parameter_defs = original_tool.parameters_json_schema + if not parameter_defs: + continue + + for tool_name, tool_arg in parameter_defs.get('properties', {}).items(): + tool_arg_description = tool_args.get(tool_name) + if not tool_arg_description: + continue + tool_arg['description'] = tool_arg_description + return original_tools async def get_tools_from_prepare_func( diff --git a/tests/test_tools.py b/tests/test_tools.py index fe58040b2a..433eb953f6 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -30,6 +30,7 @@ ToolReturnPart, UserError, UserPromptPart, + prompt_config, ) from pydantic_ai.exceptions import ApprovalRequired, CallDeferred, ModelRetry, UnexpectedModelBehavior from pydantic_ai.models.function import AgentInfo, FunctionModel @@ -155,6 +156,47 @@ def test_docstring_google(docstring_format: Literal['google', 'auto']): ) +@pytest.mark.parametrize('docstring_format', ['google', 'auto']) +def test_docstring_google_prompt_config(docstring_format: Literal['google', 'auto']): + agent = Agent(FunctionModel(get_json_schema)) + agent.tool_plain(docstring_format=docstring_format)(google_style_docstring) + p_config = prompt_config.PromptConfig( + tool_config=prompt_config.ToolConfig( + tool_args_descriptions={ + 'google_style_docstring': { + 'foo': 'The foo thing from tool config.', + 'bar': 'The bar thing from tool config.', + } + } + ) + ) + + result = agent.run_sync('Hello', prompt_config=p_config) + json_schema = json.loads(result.output) + + assert json_schema == snapshot( + { + 'name': 'google_style_docstring', + 'description': 'Do foobar stuff, a lot.', + 'parameters_json_schema': { + 'properties': { + 'foo': {'description': 'The foo thing from tool config.', 'type': 'integer'}, + 'bar': {'description': 'The bar thing from tool config.', 'type': 'string'}, + }, + 'required': ['foo', 'bar'], + 'type': 'object', + 'additionalProperties': False, + }, + 'outer_typed_dict_key': None, + 'strict': None, + 'kind': 'function', + 'sequential': False, + 'metadata': None, + 'timeout': None, + } + ) + + def sphinx_style_docstring(foo: int, /) -> str: # pragma: no cover """Sphinx style docstring. From d9a8ff9622670a24177bdb76478c650d5dc91ea2 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 19:48:41 +0530 Subject: [PATCH 84/98] updating docstring --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 45 ++++++++++++++----- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 9fc65ca232..db59d6ffe7 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -159,22 +159,43 @@ def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str @dataclass class ToolConfig: - """Configuration for customizing tool descriptions, arguments used by agents.""" + """Configuration for customizing tool descriptions and argument descriptions at runtime. + + This allows you to override tool metadata without modifying the original tool definitions. + + Example: + ```python + from pydantic_ai import Agent, PromptConfig, ToolConfig + + agent = Agent('openai:gpt-4o') + + @agent.tool_plain + def search(query: str, limit: int) -> list[str]: + '''Search for items.''' + return [] + + result = agent.run_sync( + 'Find products', + prompt_config=PromptConfig( + tool_config=ToolConfig( + tool_descriptions={'search': 'Search product catalog by name or SKU.'}, + tool_args_descriptions={ + 'search': { + 'query': 'Product name or SKU code.', + 'limit': 'Maximum results to return (1-100).', + } + }, + ) + ), + ) + ``` + """ tool_descriptions: dict[str, str] = field(default_factory=lambda: {}) - """Custom descriptions for tools used by the agent.""" + """Custom descriptions for tools, keyed by tool name.""" tool_args_descriptions: dict[str, dict[str, str]] = field(default_factory=lambda: {}) - """Custom descriptions for tool arguments for the tools used by the agent. - Structured as a nested dictionary: - { - 'tool_name': { - 'arg_name': 'arg_description', - ... - }, - ... - } - """ + """Custom descriptions for tool arguments: `{'tool_name': {'arg_name': 'description'}}`.""" def get_tool_args_for_tool(self, tool_name: str) -> dict[str, str] | None: """Get the tool argument descriptions for the given tool name.""" From bdd2ea1d6a8e472daac1eda6504a73a42616dd0d Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 20:06:25 +0530 Subject: [PATCH 85/98] docstring example --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index db59d6ffe7..e13b83e549 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -171,7 +171,7 @@ class ToolConfig: @agent.tool_plain def search(query: str, limit: int) -> list[str]: - '''Search for items.''' + """Search for items.""" return [] result = agent.run_sync( From 65c5f149ff20f68248eaf058501b0c8e1260c4c1 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 20:18:29 +0530 Subject: [PATCH 86/98] docstring example --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index e13b83e549..d74ab0d2c4 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -169,11 +169,11 @@ class ToolConfig: agent = Agent('openai:gpt-4o') - @agent.tool_plain + @agent.tool_plain(description='Search for items.') def search(query: str, limit: int) -> list[str]: - """Search for items.""" return [] + # Override the description and arg descriptions at runtime result = agent.run_sync( 'Find products', prompt_config=PromptConfig( From 6aa80f9983dec0144c58eceea0860006b4b32bbf Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 20:32:01 +0530 Subject: [PATCH 87/98] fixing test example skipping it for now --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index d74ab0d2c4..309b72d012 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -164,7 +164,7 @@ class ToolConfig: This allows you to override tool metadata without modifying the original tool definitions. Example: - ```python + ```python {test="skip"} from pydantic_ai import Agent, PromptConfig, ToolConfig agent = Agent('openai:gpt-4o') From 38f7d0db0a41f5fab5abd9385239362a7b90eaaf Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Mon, 15 Dec 2025 21:18:59 +0530 Subject: [PATCH 88/98] removing unused public methods --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 11 ----------- pydantic_ai_slim/pydantic_ai/toolsets/prepared.py | 6 +----- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 309b72d012..d845efaf79 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -201,17 +201,6 @@ def get_tool_args_for_tool(self, tool_name: str) -> dict[str, str] | None: """Get the tool argument descriptions for the given tool name.""" return self.tool_args_descriptions.get(tool_name) - def get_description_for_tool(self, tool_name: str) -> str | None: - """Get the tool description for the given tool name.""" - return self.tool_descriptions.get(tool_name) - - def get_tool_arg_description(self, tool_name: str, arg_name: str) -> str | None: - """Get the tool argument description for the given tool name and argument name.""" - tool_args = self.get_tool_args_for_tool(tool_name) - if tool_args is None: - return None - return tool_args.get(arg_name) - @dataclass class PromptConfig: diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 93d05461db..78bdb16f15 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -55,13 +55,9 @@ async def _get_tools_from_tool_config( original_tool = original_tools[tool_name].tool_def parameter_defs = original_tool.parameters_json_schema - if not parameter_defs: - continue for tool_name, tool_arg in parameter_defs.get('properties', {}).items(): - tool_arg_description = tool_args.get(tool_name) - if not tool_arg_description: - continue + tool_arg_description = tool_args[tool_name] tool_arg['description'] = tool_arg_description return original_tools From 308be792305dbcb8cca9295e86d801231c6e7bb7 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 13:57:56 +0530 Subject: [PATCH 89/98] adding descp --- pydantic_ai_slim/pydantic_ai/toolsets/prepared.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 78bdb16f15..4c69b1d608 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -56,9 +56,9 @@ async def _get_tools_from_tool_config( original_tool = original_tools[tool_name].tool_def parameter_defs = original_tool.parameters_json_schema - for tool_name, tool_arg in parameter_defs.get('properties', {}).items(): - tool_arg_description = tool_args[tool_name] - tool_arg['description'] = tool_arg_description + for param_name, param_schema in parameter_defs.get('properties', {}).items(): + if param_name in tool_args: + param_schema['description'] = tool_args[param_name] return original_tools From 635e62c7ce5a30b72907c21679fe5bec13ee078c Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 14:06:43 +0530 Subject: [PATCH 90/98] typo --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 3 ++- pydantic_ai_slim/pydantic_ai/prompt_config.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index ad417d575a..9424d998b4 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -915,8 +915,9 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content='Output tool not used - output failed validation.', + content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.output_tool_not_executed, tool_call_id=call.tool_call_id, + return_kind='output-tool-not-executed', ) output_parts.append(part) yield _messages.FunctionToolResultEvent(part) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index d845efaf79..fe1dd8974c 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -102,14 +102,14 @@ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) return message_part elif isinstance(message_part, RetryPromptPart): template = self._get_template_for_retry(message_part) - return self._apply_retry_tempelate(message_part, ctx, template) + return self._apply_retry_template(message_part, ctx, template) return message_part # Returns the original message if no template is applied def _get_template_for_retry( self, message_part: RetryPromptPart ) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: template: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = self.default_model_retry - # This is based no RetryPromptPart.model_response() implementation + # This is based on RetryPromptPart.model_response() implementation # We follow the same structure here to populate the correct template if isinstance(message_part.content, str): if message_part.tool_name is None: @@ -121,7 +121,7 @@ def _get_template_for_retry( return template - def _apply_retry_tempelate( + def _apply_retry_template( self, message_part: RetryPromptPart, ctx: _RunContext[Any], @@ -148,13 +148,16 @@ def _apply_tool_template( return message_part def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: - """Get the prompted output template for the given output schema.""" + """Get the prompted output template for the given output schema. + + Precedence: PromptedOutput.template (explicit) > PromptConfig template (agent-level). + """ from ._output import PromptedOutputSchema if not isinstance(output_schema, PromptedOutputSchema): return None - return self.prompted_output_template + return output_schema.template or self.prompted_output_template @dataclass From ff7061841b2f7de6981a16a2371e07457099e50b Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 14:26:28 +0530 Subject: [PATCH 91/98] PromptConfig taking priority --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index fe1dd8974c..fbfe8ca269 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -150,14 +150,18 @@ def _apply_tool_template( def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: """Get the prompted output template for the given output schema. - Precedence: PromptedOutput.template (explicit) > PromptConfig template (agent-level). + Precedence: PromptConfig template (if explicitly set) > PromptedOutput.template > default. """ from ._output import PromptedOutputSchema if not isinstance(output_schema, PromptedOutputSchema): return None - return output_schema.template or self.prompted_output_template + # PromptConfig takes precedence if explicitly set (different from default) + if self.prompted_output_template != DEFAULT_PROMPT_CONFIG.templates.prompted_output_template: + return self.prompted_output_template + + return output_schema.template @dataclass From 25761d46782fee43bfaa73c0ad36e5a201a6162c Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 16:23:21 +0530 Subject: [PATCH 92/98] adding coverage + test + cleanuppp --- pydantic_ai_slim/pydantic_ai/__init__.py | 20 +- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 46 +++-- .../pydantic_ai/agent/__init__.py | 21 +- pydantic_ai_slim/pydantic_ai/messages.py | 12 +- pydantic_ai_slim/pydantic_ai/prompt_config.py | 173 ++++++++++------ pydantic_ai_slim/pydantic_ai/tools.py | 4 +- .../pydantic_ai/toolsets/prepared.py | 12 +- tests/test_agent.py | 191 +++++++++++++++++- tests/test_toolsets.py | 28 +++ 9 files changed, 402 insertions(+), 105 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 97191a7ac8..1322687dfd 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -94,7 +94,18 @@ ModelProfile, ModelProfileSpec, ) -from .prompt_config import PromptConfig, PromptTemplates, ToolConfig +from .prompt_config import ( + DEFAULT_FINAL_RESULT_PROCESSED, + DEFAULT_FUNCTION_TOOL_NOT_EXECUTED, + DEFAULT_MODEL_RETRY, + DEFAULT_OUTPUT_TOOL_NOT_EXECUTED, + DEFAULT_OUTPUT_VALIDATION_FAILED, + DEFAULT_PROMPTED_OUTPUT_TEMPLATE, + DEFAULT_TOOL_CALL_DENIED, + PromptConfig, + PromptTemplates, + ToolConfig, +) from .run import AgentRun, AgentRunResult, AgentRunResultEvent from .settings import ModelSettings from .tools import DeferredToolRequests, DeferredToolResults, RunContext, Tool, ToolApproved, ToolDefinition, ToolDenied @@ -234,6 +245,13 @@ 'PromptConfig', 'PromptTemplates', 'ToolConfig', + 'DEFAULT_FINAL_RESULT_PROCESSED', + 'DEFAULT_FUNCTION_TOOL_NOT_EXECUTED', + 'DEFAULT_MODEL_RETRY', + 'DEFAULT_OUTPUT_TOOL_NOT_EXECUTED', + 'DEFAULT_OUTPUT_VALIDATION_FAILED', + 'DEFAULT_PROMPTED_OUTPUT_TEMPLATE', + 'DEFAULT_TOOL_CALL_DENIED', # format_prompt 'format_as_xml', # settings diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 9424d998b4..0214516d52 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -142,9 +142,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]): model: models.Model model_settings: ModelSettings | None - prompt_config: _prompt_config.PromptConfig = dataclasses.field( - default_factory=lambda: _prompt_config.DEFAULT_PROMPT_CONFIG - ) + prompt_config: _prompt_config.PromptConfig | None = None usage_limits: _usage.UsageLimits max_result_retries: int end_strategy: EndStrategy @@ -391,8 +389,16 @@ async def _prepare_request_parameters( """Build tools and create an agent model.""" output_schema = ctx.deps.output_schema - prompt_config = ctx.deps.prompt_config - prompted_output_template = prompt_config.templates.get_prompted_output_template(output_schema) + # Get the prompted output template with precedence: + # PromptConfig template > PromptedOutput.template > default + prompted_output_template: str | None = None + if isinstance(output_schema, _output.PromptedOutputSchema): + prompt_config = ctx.deps.prompt_config + templates = prompt_config.templates if prompt_config else None + config_template = templates.prompted_output_template if templates else None + prompted_output_template = ( + config_template or output_schema.template or _prompt_config.DEFAULT_PROMPTED_OUTPUT_TEMPLATE + ) function_tools: list[ToolDefinition] = [] output_tools: list[ToolDefinition] = [] @@ -517,9 +523,12 @@ async def _prepare_request( prompt_config = ctx.deps.prompt_config - message_history = _apply_prompt_templates_to_message_history( - message_history, prompt_config.templates, run_context - ) + # Only apply templates if explicitly configured - when prompt_config or templates is None, + # message parts already have default values set at creation time + if prompt_config and prompt_config.templates is not None: + message_history = _apply_prompt_templates_to_message_history( + message_history, prompt_config.templates, run_context + ) ctx.state.message_history[:] = message_history @@ -799,8 +808,12 @@ def _handle_final_result( # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: - run_ctx = build_run_context(ctx) - tool_responses = [ctx.deps.prompt_config.templates.apply_template(part, run_ctx) for part in tool_responses] + # Only apply templates if explicitly configured + if ctx.deps.prompt_config and ctx.deps.prompt_config.templates is not None: + run_ctx = build_run_context(ctx) + tool_responses = [ + ctx.deps.prompt_config.templates.apply_template(part, run_ctx) for part in tool_responses + ] messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) return End(final_result) @@ -886,7 +899,7 @@ async def process_tool_calls( # noqa: C901 if final_result and final_result.tool_call_id == call.tool_call_id: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.final_result_processed, + content=_prompt_config.DEFAULT_FINAL_RESULT_PROCESSED, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -896,7 +909,7 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.output_tool_not_executed, + content=_prompt_config.DEFAULT_OUTPUT_TOOL_NOT_EXECUTED, tool_call_id=call.tool_call_id, return_kind='output-tool-not-executed', ) @@ -915,9 +928,8 @@ async def process_tool_calls( # noqa: C901 yield _messages.FunctionToolCallEvent(call) part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.output_tool_not_executed, + content=_prompt_config.DEFAULT_OUTPUT_VALIDATION_FAILED, tool_call_id=call.tool_call_id, - return_kind='output-tool-not-executed', ) output_parts.append(part) yield _messages.FunctionToolResultEvent(part) @@ -940,7 +952,7 @@ async def process_tool_calls( # noqa: C901 else: part = _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.final_result_processed, + content=_prompt_config.DEFAULT_FINAL_RESULT_PROCESSED, tool_call_id=call.tool_call_id, return_kind='final-result-processed', ) @@ -957,7 +969,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.function_tool_not_executed, + content=_prompt_config.DEFAULT_FUNCTION_TOOL_NOT_EXECUTED, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) @@ -1016,7 +1028,7 @@ async def process_tool_calls( # noqa: C901 output_parts.append( _messages.ToolReturnPart( tool_name=call.tool_name, - content=_prompt_config.DEFAULT_PROMPT_CONFIG.templates.function_tool_not_executed, + content=_prompt_config.DEFAULT_FUNCTION_TOOL_NOT_EXECUTED, tool_call_id=call.tool_call_id, return_kind='function-tool-not-executed', ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index c0b9624096..43f4521c77 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -1378,17 +1378,19 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: else: return deps - def _get_prompt_config(self, prompt_config: _prompt_config.PromptConfig | None) -> _prompt_config.PromptConfig: + def _get_prompt_config( + self, prompt_config: _prompt_config.PromptConfig | None + ) -> _prompt_config.PromptConfig | None: """Get prompt_config for a run. If we've overridden prompt_config via `_override_prompt_config`, use that, - otherwise use the prompt_config passed to the call, falling back to the agent default, - and finally falling back to the global default. + otherwise use the prompt_config passed to the call, falling back to the agent default. + Returns None if no prompt_config is configured at any level. """ if some_prompt_config := self._override_prompt_config.get(): return some_prompt_config.value else: - return prompt_config or self.prompt_config or _prompt_config.DEFAULT_PROMPT_CONFIG + return prompt_config or self.prompt_config def _normalize_instructions( self, @@ -1452,7 +1454,16 @@ def copy_dynamic_toolsets(toolset: AbstractToolset[AgentDepsT]) -> AbstractTools return toolset toolset = toolset.visit_and_replace(copy_dynamic_toolsets) - tool_config = self._get_prompt_config(prompt_config).tool_config + + # Resolve tool_config from the prompt_config precedence chain: + # 1. Context override (agent.override(prompt_config=...)) + # 2. Per-call parameter (agent.run(..., prompt_config=...)) + # 3. Agent-level default (Agent(..., prompt_config=...)) + tool_config = ( + effective_prompt_config.tool_config + if (effective_prompt_config := self._get_prompt_config(prompt_config)) + else None + ) if self._prepare_tools or tool_config: toolset = PreparedToolset(toolset, self._prepare_tools, tool_config=tool_config) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index ab9249db3a..ab587ab2c6 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -916,12 +916,6 @@ class BuiltinToolReturnPart(BaseToolReturnPart): error_details_ta = pydantic.TypeAdapter(list[pydantic_core.ErrorDetails], config=pydantic.ConfigDict(defer_build=True)) -def _get_default_model_retry_message() -> str: - from .prompt_config import DEFAULT_PROMPT_CONFIG - - return cast(str, DEFAULT_PROMPT_CONFIG.templates.default_model_retry) - - @dataclass(repr=False) class RetryPromptPart: """A message back to a model asking it to try again. @@ -962,7 +956,7 @@ class RetryPromptPart: part_kind: Literal['retry-prompt'] = 'retry-prompt' """Part type identifier, this is available on all parts as a discriminator.""" - retry_message: str | None = field(default_factory=_get_default_model_retry_message) + retry_message: str | None = None """The retry message rendered using the user's prompt template. It is populated after checking the conditions for the retry so that the correct template is used.""" def model_response(self) -> str: @@ -978,6 +972,10 @@ def model_response(self) -> str: description = ( f'{len(self.content)} validation error{"s" if plural else ""}:\n```json\n{json_errors.decode()}\n```' ) + + if self.retry_message is None: + from .prompt_config import DEFAULT_MODEL_RETRY + return f'{description}\n\n{DEFAULT_MODEL_RETRY}' return f'{description}\n\n{self.retry_message}' diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index fbfe8ca269..30125cdbef 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -5,12 +5,41 @@ from textwrap import dedent from typing import TYPE_CHECKING, Any +from .messages import ModelRequestPart, RetryPromptPart, ToolReturnPart + if TYPE_CHECKING: - from ._output import OutputSchema from ._run_context import RunContext as _RunContext -from .messages import ModelRequestPart, RetryPromptPart, ToolReturnPart +# Default template strings - used when template field is None +DEFAULT_FINAL_RESULT_PROCESSED = 'Final result processed.' +"""Default confirmation message when a final result is successfully processed.""" + +DEFAULT_OUTPUT_TOOL_NOT_EXECUTED = 'Output tool not used - a final result was already processed.' +"""Default message when an output tool call is skipped because a result was already found.""" + +DEFAULT_OUTPUT_VALIDATION_FAILED = 'Output tool not used - output failed validation.' +"""Default message when an output tool fails validation but another output tool already succeeded.""" + +DEFAULT_FUNCTION_TOOL_NOT_EXECUTED = 'Tool not executed - a final result was already processed.' +"""Default message when a function tool call is skipped because a result was already found.""" + +DEFAULT_TOOL_CALL_DENIED = 'The tool call was denied.' +"""Default message when a tool call is denied by an approval handler.""" + +DEFAULT_MODEL_RETRY = 'Fix the errors and try again.' +"""Default message appended to retry prompts.""" + +DEFAULT_PROMPTED_OUTPUT_TEMPLATE = dedent( + """ + Always respond with a JSON object that's compatible with this schema: + + {schema} + + Don't include any text or Markdown fencing before or after. + """ +) +"""Default template for prompted output schema instructions.""" @dataclass @@ -18,6 +47,7 @@ class PromptTemplates: """Templates for customizing system-generated messages that Pydantic AI sends to models. Each template can be either: + - `None` to use the default message (or preserve existing content for `tool_call_denied`) - A static string that replaces the default message - A callable that receives the message part and [`RunContext`][pydantic_ai.RunContext] and returns a dynamically generated string @@ -44,71 +74,90 @@ class PromptTemplates: ``` """ - final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'Final result processed.' - """Confirmation message sent when a final result is successfully processed.""" + final_result_processed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Confirmation message sent when a final result is successfully processed. + + If `None`, uses the default: 'Final result processed.' + """ - output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( - 'Output tool not used - a final result was already processed.' - ) - """Message sent when an output tool call is skipped because a result was already found.""" + output_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when an output tool call is skipped because a result was already found. - function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = ( - 'Tool not executed - a final result was already processed.' - ) - """Message sent when a function tool call is skipped because a result was already found.""" + If `None`, uses the default: 'Output tool not used - a final result was already processed.' + """ + + function_tool_not_executed: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None + """Message sent when a function tool call is skipped because a result was already found. + + If `None`, uses the default: 'Tool not executed - a final result was already processed.' + """ - tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] = 'The tool call was denied.' + tool_call_denied: str | Callable[[ToolReturnPart, _RunContext[Any]], str] | None = None """Message sent when a tool call is denied by an approval handler. - Note: Custom messages set via `ToolDenied` are preserved unless this template is explicitly overridden. + If `None`, preserves the custom message from `ToolDenied` (or uses the default if none was set). + Set explicitly to override all denied tool messages. """ - default_model_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' - """Default message sent when a `ModelRetry` exception is raised.""" + validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + """Message appended to validation errors when asking the model to retry. - validation_errors_retry: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' - """Message appended to validation errors when asking the model to retry.""" + If `None`, uses the default: 'Fix the errors and try again.' + """ - model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = 'Fix the errors and try again.' - """Message sent when a `ModelRetry` exception is raised from a tool.""" + model_retry_string_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + """Message sent when a `ModelRetry` exception is raised from a tool. - model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = ( - 'Fix the errors and try again.' - ) - """Message sent when a `ModelRetry` exception is raised outside of a tool context.""" + If `None`, uses the default: 'Fix the errors and try again.' + """ - prompted_output_template: str = dedent( - """ - Always respond with a JSON object that's compatible with this schema: + model_retry_string_no_tool: str | Callable[[RetryPromptPart, _RunContext[Any]], str] | None = None + """Message sent when a `ModelRetry` exception is raised outside of a tool context. - {schema} + If `None`, uses the default: 'Fix the errors and try again.' + """ - Don't include any text or Markdown fencing before or after. - """ - ) + prompted_output_template: str | None = None + """Template for prompted output schema instructions. + + If `None`, uses the template from `PromptedOutput` if set, otherwise uses the default template. + Set explicitly to override the template for all prompted outputs. + """ def apply_template(self, message_part: ModelRequestPart, ctx: _RunContext[Any]) -> ModelRequestPart: if isinstance(message_part, ToolReturnPart): if message_part.return_kind == 'final-result-processed': - return self._apply_tool_template(message_part, ctx, self.final_result_processed) + template = ( + self.final_result_processed + if self.final_result_processed is not None + else DEFAULT_FINAL_RESULT_PROCESSED + ) + message_part = self._apply_tool_template(message_part, ctx, template) elif message_part.return_kind == 'output-tool-not-executed': - return self._apply_tool_template(message_part, ctx, self.output_tool_not_executed) + template = ( + self.output_tool_not_executed + if self.output_tool_not_executed is not None + else DEFAULT_OUTPUT_TOOL_NOT_EXECUTED + ) + message_part = self._apply_tool_template(message_part, ctx, template) elif message_part.return_kind == 'function-tool-not-executed': - return self._apply_tool_template(message_part, ctx, self.function_tool_not_executed) + template = ( + self.function_tool_not_executed + if self.function_tool_not_executed is not None + else DEFAULT_FUNCTION_TOOL_NOT_EXECUTED + ) + message_part = self._apply_tool_template(message_part, ctx, template) elif message_part.return_kind == 'tool-denied': - # The content may already have a custom message from ToolDenied in which case we should not override it - if self.tool_call_denied != DEFAULT_PROMPT_CONFIG.templates.tool_call_denied: - return self._apply_tool_template(message_part, ctx, self.tool_call_denied) - return message_part + if self.tool_call_denied is not None: + message_part = self._apply_tool_template(message_part, ctx, self.tool_call_denied) elif isinstance(message_part, RetryPromptPart): template = self._get_template_for_retry(message_part) - return self._apply_retry_template(message_part, ctx, template) - return message_part # Returns the original message if no template is applied + message_part = self._apply_retry_template(message_part, ctx, template) + return message_part def _get_template_for_retry( self, message_part: RetryPromptPart ) -> str | Callable[[RetryPromptPart, _RunContext[Any]], str]: - template: str | Callable[[RetryPromptPart, _RunContext[Any]], str] = self.default_model_retry # This is based on RetryPromptPart.model_response() implementation # We follow the same structure here to populate the correct template if isinstance(message_part.content, str): @@ -119,6 +168,9 @@ def _get_template_for_retry( else: template = self.validation_errors_retry + if template is None: + template = DEFAULT_MODEL_RETRY + return template def _apply_retry_template( @@ -147,22 +199,6 @@ def _apply_tool_template( message_part = replace(message_part, content=template(message_part, ctx)) return message_part - def get_prompted_output_template(self, output_schema: OutputSchema[Any]) -> str | None: - """Get the prompted output template for the given output schema. - - Precedence: PromptConfig template (if explicitly set) > PromptedOutput.template > default. - """ - from ._output import PromptedOutputSchema - - if not isinstance(output_schema, PromptedOutputSchema): - return None - - # PromptConfig takes precedence if explicitly set (different from default) - if self.prompted_output_template != DEFAULT_PROMPT_CONFIG.templates.prompted_output_template: - return self.prompted_output_template - - return output_schema.template - @dataclass class ToolConfig: @@ -218,12 +254,16 @@ class PromptConfig: - **Prompt Templates**: Messages for retry prompts, tool return confirmations, validation errors, and other system-generated text via [`PromptTemplates`][pydantic_ai.PromptTemplates]. - - **Tool Configuration** (planned): Tool descriptions, parameter descriptions, and other + - **Tool Configuration**: Tool descriptions, parameter descriptions, and other tool metadata - allowing you to override descriptions and args for tools at the agent level. This allows you to fully customize how your agent communicates with the model without modifying the underlying tool or agent code. + Note: + At least one of `templates` or `tool_config` must be provided. Creating a + `PromptConfig()` with no arguments will raise a `ValueError`. + Example: ```python from pydantic_ai import Agent, PromptConfig, PromptTemplates @@ -242,9 +282,10 @@ class PromptConfig: Attributes: templates: Templates for customizing system-generated messages like retry prompts, tool return confirmations, and validation error messages. + tool_config: Configuration for customizing tool descriptions and metadata. """ - templates: PromptTemplates = field(default_factory=PromptTemplates) + templates: PromptTemplates | None = None """Templates for customizing system-generated messages sent to the model. See [`PromptTemplates`][pydantic_ai.PromptTemplates] for available template options. @@ -255,10 +296,10 @@ class PromptConfig: See [`ToolConfig`][pydantic_ai.ToolConfig] for available configuration options. """ - -DEFAULT_PROMPT_CONFIG = PromptConfig() -"""The default prompt configuration used when no custom configuration is provided. - -This uses the default [`PromptTemplates`][pydantic_ai.PromptTemplates] with sensible -defaults for all system-generated messages. -""" + def __post_init__(self): + if self.templates is None and self.tool_config is None: + raise ValueError( + "PromptConfig requires at least 'templates' or 'tool_config' to be provided. " + 'Use PromptConfig(templates=PromptTemplates()) for default template behavior, ' + 'or PromptConfig(tool_config=ToolConfig(...)) for tool customization.' + ) diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index f4d55b3b83..b23b7cb06b 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -14,7 +14,7 @@ from .builtin_tools import AbstractBuiltinTool from .exceptions import ModelRetry from .messages import RetryPromptPart, ToolCallPart, ToolReturn -from .prompt_config import DEFAULT_PROMPT_CONFIG +from .prompt_config import DEFAULT_TOOL_CALL_DENIED __all__ = ( 'AgentDepsT', @@ -177,7 +177,7 @@ class ToolApproved: class ToolDenied: """Indicates that a tool call has been denied and that a denial message should be returned to the model.""" - message: str = cast(str, DEFAULT_PROMPT_CONFIG.templates.tool_call_denied) + message: str = DEFAULT_TOOL_CALL_DENIED """The message to return to the model.""" _: KW_ONLY diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py index 4c69b1d608..e6d3a435ac 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/prepared.py @@ -1,5 +1,6 @@ from __future__ import annotations +from copy import deepcopy from dataclasses import dataclass, replace from pydantic_ai.prompt_config import ToolConfig @@ -21,9 +22,7 @@ class PreparedToolset(WrapperToolset[AgentDepsT]): prepare_func: ToolsPrepareFunc[AgentDepsT] | None tool_config: ToolConfig | None = None - async def get_tools( - self, ctx: RunContext[AgentDepsT], tool_config: ToolConfig | None = None - ) -> dict[str, ToolsetTool[AgentDepsT]]: + async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: original_tools = await super().get_tools(ctx) tools_after_tool_config = await self._get_tools_from_tool_config(original_tools) @@ -53,13 +52,16 @@ async def _get_tools_from_tool_config( if not tool_args: continue - original_tool = original_tools[tool_name].tool_def - parameter_defs = original_tool.parameters_json_schema + original_tool = original_tools[tool_name] + parameter_defs = deepcopy(original_tool.tool_def.parameters_json_schema) for param_name, param_schema in parameter_defs.get('properties', {}).items(): if param_name in tool_args: param_schema['description'] = tool_args[param_name] + updated_tool_def = replace(original_tool.tool_def, parameters_json_schema=parameter_defs) + original_tools[tool_name] = replace(original_tool, tool_def=updated_tool_def) + return original_tools async def get_tools_from_prepare_func( diff --git a/tests/test_agent.py b/tests/test_agent.py index 49a917911b..9321141861 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -534,6 +534,125 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse Custom retry message override""") + def model_with_tool_retry(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + assert info.function_tools is not None + if len(messages) == 1: + return ModelResponse(parts=[ToolCallPart('retry_tool', '{}')]) + else: + return ModelResponse(parts=[TextPart('done')]) + + agent_tool_retry = Agent( + FunctionModel(model_with_tool_retry), + output_type=str, + prompt_config=PromptConfig(templates=PromptTemplates(model_retry_string_tool='Custom tool retry message')), + ) + + @agent_tool_retry.tool_plain + def retry_tool() -> str: + raise ModelRetry('Tool failed') + + result_tool_retry = agent_tool_retry.run_sync('Test') + assert result_tool_retry.output == 'done' + assert result_tool_retry.all_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Test', + timestamp=IsDatetime(), + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[ToolCallPart(tool_name='retry_tool', args='{}', tool_call_id=IsStr())], + usage=RequestUsage(input_tokens=51, output_tokens=2), + model_name='function:model_with_tool_retry:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + RetryPromptPart( + content='Tool failed', + tool_name='retry_tool', + tool_call_id=IsStr(), + timestamp=IsDatetime(), + retry_message='Custom tool retry message', + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='done')], + usage=RequestUsage(input_tokens=57, output_tokens=3), + model_name='function:model_with_tool_retry:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ] + ) + + # Test model_retry_string_no_tool template (RetryPromptPart with string content, no tool) + def model_with_no_tool_retry(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + if len(messages) == 1: + return ModelResponse(parts=[TextPart('invalid')]) + else: + return ModelResponse(parts=[TextPart('valid')]) + + agent_no_tool_retry = Agent( + FunctionModel(model_with_no_tool_retry), + output_type=str, + prompt_config=PromptConfig(templates=PromptTemplates(model_retry_string_no_tool='Custom no-tool retry')), + ) + + @agent_no_tool_retry.output_validator + def check_valid(ctx: RunContext[None], output: str) -> str: + if output == 'invalid': + raise ModelRetry('Output is invalid') + return output + + result_no_tool = agent_no_tool_retry.run_sync('Test') + assert result_no_tool.output == 'valid' + assert result_no_tool.all_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Test', + timestamp=IsDatetime(), + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='invalid')], + usage=RequestUsage(input_tokens=51, output_tokens=1), + model_name='function:model_with_no_tool_retry:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + RetryPromptPart( + content='Output is invalid', + tool_call_id=IsStr(), + timestamp=IsDatetime(), + retry_message='Custom no-tool retry', + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='valid')], + usage=RequestUsage(input_tokens=59, output_tokens=2), + model_name='function:model_with_no_tool_retry:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ] + ) + def test_prompt_config_tool_config_descriptions(): """Test that ToolConfig.tool_descriptions updates tool descriptions at the agent level.""" @@ -5624,7 +5743,7 @@ def foo_tool(foo: Foo) -> int: 'tool_call_id': IsStr(), 'timestamp': IsStr(), 'part_kind': 'retry-prompt', - 'retry_message': 'Fix the errors and try again.', + 'retry_message': None, } ], 'instructions': None, @@ -6553,7 +6672,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon model = FunctionModel(model_function) - # Using prompt_config without setting tool_call_denied to cover line 78 in prompt_config.py + # Test with tool_call_denied template set (covers the True branch at line 139) agent = Agent( model, output_type=[str, DeferredToolRequests], @@ -6732,6 +6851,74 @@ def create_file(path: str, content: str) -> str: ] ) + def model_function_for_none_template(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + if len(messages) == 1: + return ModelResponse( + parts=[ + ToolCallPart(tool_name='protected_delete', args={'path': 'file.txt'}, tool_call_id='del_call'), + ] + ) + else: + return ModelResponse(parts=[TextPart('Done!')]) + + agent_no_template = Agent( + FunctionModel(model_function_for_none_template), + output_type=[str, DeferredToolRequests], + prompt_config=PromptConfig(templates=PromptTemplates(final_result_processed='Done')), + ) + + @agent_no_template.tool_plain(requires_approval=True) + def protected_delete(path: str) -> str: + return f'File {path!r} deleted' + + result_no_template = await agent_no_template.run('Delete file.txt') + result_no_template = await agent_no_template.run( + message_history=result_no_template.all_messages(), + deferred_tool_results=DeferredToolResults( + approvals={'del_call': ToolDenied('Original denial message preserved')}, + ), + ) + + assert result_no_template.all_messages() == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content='Delete file.txt', + timestamp=IsDatetime(), + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[ToolCallPart(tool_name='protected_delete', args={'path': 'file.txt'}, tool_call_id='del_call')], + usage=RequestUsage(input_tokens=53, output_tokens=6), + model_name='function:model_function_for_none_template:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='protected_delete', + content='Original denial message preserved', + tool_call_id='del_call', + timestamp=IsDatetime(), + return_kind='tool-denied', + ) + ], + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='Done!')], + usage=RequestUsage(input_tokens=57, output_tokens=7), + model_name='function:model_function_for_none_template:', + timestamp=IsDatetime(), + run_id=IsStr(), + ), + ] + ) + async def test_run_with_deferred_tool_results_errors(): agent = Agent('test') diff --git a/tests/test_toolsets.py b/tests/test_toolsets.py index 9a4a344d12..d054e8cea0 100644 --- a/tests/test_toolsets.py +++ b/tests/test_toolsets.py @@ -25,6 +25,7 @@ from pydantic_ai._tool_manager import ToolManager from pydantic_ai.exceptions import ModelRetry, ToolRetryError, UnexpectedModelBehavior, UserError from pydantic_ai.models.test import TestModel +from pydantic_ai.prompt_config import ToolConfig from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets._dynamic import DynamicToolset from pydantic_ai.usage import RunUsage @@ -483,6 +484,33 @@ async def prepare_add_context(ctx: RunContext[TestDeps], tool_defs: list[ToolDef ] ) + partial_args_toolset = FunctionToolset[None]() + + @partial_args_toolset.tool + def calc(x: int, y: int, z: int) -> int: + """Calculate sum""" + return x + y + z + + partial_tool_config = ToolConfig( + tool_args_descriptions={ + 'calc': { + 'x': 'First number', + 'z': 'Third number', + # 'y' intentionally missing + } + } + ) + prepared_partial = PreparedToolset(partial_args_toolset, None, tool_config=partial_tool_config) + partial_context = build_run_context(None) + partial_manager = await ToolManager[None](prepared_partial).for_run_step(partial_context) + + calc_def = partial_manager.tool_defs[0] + assert calc_def.name == 'calc' + # 'x' and 'z' should have descriptions, 'y' should not + assert calc_def.parameters_json_schema['properties']['x'].get('description') == 'First number' + assert 'description' not in calc_def.parameters_json_schema['properties']['y'] + assert calc_def.parameters_json_schema['properties']['z'].get('description') == 'Third number' + async def test_context_manager(): try: From 20f15cf2159d3de06a5b756b824379889f1cf7da Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 16:23:40 +0530 Subject: [PATCH 93/98] lint --- pydantic_ai_slim/pydantic_ai/messages.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index ab587ab2c6..c198bb2841 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -972,9 +972,10 @@ def model_response(self) -> str: description = ( f'{len(self.content)} validation error{"s" if plural else ""}:\n```json\n{json_errors.decode()}\n```' ) - + if self.retry_message is None: from .prompt_config import DEFAULT_MODEL_RETRY + return f'{description}\n\n{DEFAULT_MODEL_RETRY}' return f'{description}\n\n{self.retry_message}' From c98e35d1b8f8548a1c12ddc7dfadad20f7766590 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 16:34:49 +0530 Subject: [PATCH 94/98] moving my test to the end --- tests/test_agent_output_schemas.py | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/tests/test_agent_output_schemas.py b/tests/test_agent_output_schemas.py index c187a9eb7f..f6fb90df01 100644 --- a/tests/test_agent_output_schemas.py +++ b/tests/test_agent_output_schemas.py @@ -433,22 +433,6 @@ async def test_deferred_output_json_schema(): } ) - -def test_build_instructions_appends_schema_placeholder(): - """Test that build_instructions appends {schema} when template doesn't contain it.""" - from pydantic_ai._output import OutputObjectDefinition, PromptedOutputSchema - - object_def = OutputObjectDefinition( - json_schema={'type': 'object', 'properties': {'name': {'type': 'string'}}}, - name='TestOutput', - description='A test output', - ) - template_without_schema = 'Please respond with JSON.' - - result = PromptedOutputSchema.build_instructions(template_without_schema, object_def) - assert result == snapshot( - 'Please respond with JSON.\n\n{"type": "object", "properties": {"name": {"type": "string"}}, "title": "TestOutput", "description": "A test output"}' - ) # special case of only BinaryImage and DeferredToolRequests agent = Agent('test', output_type=[BinaryImage, DeferredToolRequests]) assert agent.output_json_schema() == snapshot( @@ -567,3 +551,20 @@ def test_build_instructions_appends_schema_placeholder(): }, } ) + + +def test_build_instructions_appends_schema_placeholder(): + """Test that build_instructions appends {schema} when template doesn't contain it.""" + from pydantic_ai._output import OutputObjectDefinition, PromptedOutputSchema + + object_def = OutputObjectDefinition( + json_schema={'type': 'object', 'properties': {'name': {'type': 'string'}}}, + name='TestOutput', + description='A test output', + ) + template_without_schema = 'Please respond with JSON.' + + result = PromptedOutputSchema.build_instructions(template_without_schema, object_def) + assert result == snapshot( + 'Please respond with JSON.\n\n{"type": "object", "properties": {"name": {"type": "string"}}, "title": "TestOutput", "description": "A test output"}' + ) From c39437be85d5eb70e9532f74ed13fa296027c763 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 16:44:42 +0530 Subject: [PATCH 95/98] no cover post_init --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 30125cdbef..52f86e0f33 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -296,7 +296,7 @@ class PromptConfig: See [`ToolConfig`][pydantic_ai.ToolConfig] for available configuration options. """ - def __post_init__(self): + def __post_init__(self): # pragma: no cover if self.templates is None and self.tool_config is None: raise ValueError( "PromptConfig requires at least 'templates' or 'tool_config' to be provided. " From ef8f9ac83882561e45f772900816cd78aa086229 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 16:44:55 +0530 Subject: [PATCH 96/98] no cover post_init --- pydantic_ai_slim/pydantic_ai/prompt_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 52f86e0f33..6eef5cc25f 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -296,7 +296,7 @@ class PromptConfig: See [`ToolConfig`][pydantic_ai.ToolConfig] for available configuration options. """ - def __post_init__(self): # pragma: no cover + def __post_init__(self): # pragma: no cover if self.templates is None and self.tool_config is None: raise ValueError( "PromptConfig requires at least 'templates' or 'tool_config' to be provided. " From 19768a1e90a57617858454c81be21ce6bca8c5e3 Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 17:02:02 +0530 Subject: [PATCH 97/98] fix tests that I ruined, F --- docs/agents.md | 2 +- tests/models/test_anthropic.py | 1 - tests/models/test_bedrock.py | 1 - tests/models/test_cohere.py | 1 - tests/models/test_gemini.py | 1 - tests/models/test_google.py | 2 -- tests/models/test_groq.py | 1 - tests/models/test_huggingface.py | 2 -- tests/models/test_mistral.py | 3 --- tests/models/test_model_test.py | 1 - tests/models/test_openai.py | 1 - tests/models/test_openai_responses.py | 3 --- tests/test_mcp.py | 1 - tests/test_temporal.py | 1 - tests/test_tools.py | 16 +--------------- 15 files changed, 2 insertions(+), 35 deletions(-) diff --git a/docs/agents.md b/docs/agents.md index b9126f0ec3..f4240aa908 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -788,7 +788,7 @@ from pydantic_ai.tools import RunContext def custom_retry_message(part: RetryPromptPart, ctx: RunContext) -> str: - return f'Attempt #{ctx.retries + 1}: Please fix the errors and try again.' + return f'Attempt #{ctx.retry + 1}: Please fix the errors and try again.' agent = Agent( 'openai:gpt-5', diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 09dc30f898..b55f6a2990 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -1156,7 +1156,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 95adcd5f8e..205f1a43b5 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -477,7 +477,6 @@ async def get_capital(country: str) -> str: tool_name='get_capital', tool_call_id='tooluse_F8LnaCMtQ0-chKTnPhNH2g', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index 46f6d71aaa..fd589512fa 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -331,7 +331,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index dc44538fdd..e58ef36f51 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -710,7 +710,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_google.py b/tests/models/test_google.py index c718304eaf..e56b53b676 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -608,7 +608,6 @@ async def get_capital(country: str) -> str: tool_name='get_capital', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -3517,7 +3516,6 @@ class Animal(BaseModel): content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 6bc77303ac..71074fe4bf 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -360,7 +360,6 @@ async def get_location(loc_name: str) -> str: content='Wrong location, please try again', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 72483208ed..d111b086fd 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -389,7 +389,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -832,7 +831,6 @@ def response_validator(value: str) -> str: content='Response is invalid', tool_name=None, tool_call_id=IsStr(), - retry_message='Fix the errors and try again.', timestamp=IsNow(tz=timezone.utc), ) ], diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 43b106ac56..f7db9d5d3e 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -1162,7 +1162,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -1324,7 +1323,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -1728,7 +1726,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index 07e8b88cf2..c6f0a30c76 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -237,7 +237,6 @@ async def my_ret(x: int) -> str: tool_name='my_ret', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 60de49e9d0..5e62f83b58 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -368,7 +368,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id='1', timestamp=IsNow(tz=timezone.utc), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index b87e4f485c..3aa4b86049 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -321,7 +321,6 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_location', @@ -5963,7 +5962,6 @@ async def test_openai_responses_image_generation_tool_without_image_output( content='Please return text or call a tool.', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -6127,7 +6125,6 @@ class Animal(BaseModel): content='Please return text or include your response in a tool call.', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_mcp.py b/tests/test_mcp.py index 977d61fa6e..f737b20d92 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -1227,7 +1227,6 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): tool_name='get_error', tool_call_id='call_rETXZWddAGZSHyVHAxptPGgc', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_temporal.py b/tests/test_temporal.py index af88a8f355..d51842ac0e 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -2001,7 +2001,6 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien tool_name='get_weather_in_city', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), diff --git a/tests/test_tools.py b/tests/test_tools.py index 433eb953f6..6affc4717c 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1847,7 +1847,6 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -1862,7 +1861,6 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -1944,7 +1942,6 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -1959,7 +1956,6 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -1979,7 +1975,6 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -1994,7 +1989,6 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), UserPromptPart( content='I bought a banana', @@ -2022,7 +2016,6 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -2037,7 +2030,6 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), UserPromptPart( content='I bought a banana', @@ -2097,7 +2089,6 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_banana', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='get_price', @@ -2112,14 +2103,12 @@ def buy(fruit: str): tool_name='get_price', tool_call_id='get_price_grape', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), RetryPromptPart( content='Apples are not available', tool_name='buy', tool_call_id='buy_apple', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), ToolReturnPart( tool_name='buy', @@ -2134,7 +2123,6 @@ def buy(fruit: str): tool_name='buy', tool_call_id='buy_pear', timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ), UserPromptPart( content='The price of apple is 10.0.', @@ -2380,7 +2368,7 @@ def test_deferred_tool_results_serializable(): 'tool_call_id': 'foo', 'timestamp': IsDatetime(), 'part_kind': 'retry-prompt', - 'retry_message': 'Fix the errors and try again.', + 'retry_message': None, }, 'any': {'foo': 'bar'}, }, @@ -2489,7 +2477,6 @@ def always_fail(ctx: RunContext[None]) -> str: tool_name='always_fail', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), @@ -2508,7 +2495,6 @@ def always_fail(ctx: RunContext[None]) -> str: tool_name='always_fail', tool_call_id=IsStr(), timestamp=IsDatetime(), - retry_message='Fix the errors and try again.', ) ], run_id=IsStr(), From 88ccdf347eb1b59e1b6400ae2fae48daca148bef Mon Sep 17 00:00:00 2001 From: adtyavrdhn Date: Tue, 16 Dec 2025 17:19:41 +0530 Subject: [PATCH 98/98] fix doc reference --- docs/agents.md | 1 - pydantic_ai_slim/pydantic_ai/_agent_graph.py | 8 ++++---- pydantic_ai_slim/pydantic_ai/prompt_config.py | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/agents.md b/docs/agents.md index f4240aa908..e4f2068e9c 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -1192,7 +1192,6 @@ with capture_run_messages() as messages: # (2)! tool_name='calc_volume', tool_call_id='pyd_ai_tool_call_id', timestamp=datetime.datetime(...), - retry_message='Fix the errors and try again.', ) ], run_id='...', diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 0214516d52..4450ea310a 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -390,15 +390,15 @@ async def _prepare_request_parameters( output_schema = ctx.deps.output_schema # Get the prompted output template with precedence: - # PromptConfig template > PromptedOutput.template > default + # PromptConfig template > PromptedOutput.template > model profile default (handled downstream) prompted_output_template: str | None = None if isinstance(output_schema, _output.PromptedOutputSchema): prompt_config = ctx.deps.prompt_config templates = prompt_config.templates if prompt_config else None config_template = templates.prompted_output_template if templates else None - prompted_output_template = ( - config_template or output_schema.template or _prompt_config.DEFAULT_PROMPTED_OUTPUT_TEMPLATE - ) + # Only use config_template if explicitly set, otherwise preserve original behavior + # and let the model apply its profile-specific default + prompted_output_template = config_template or output_schema.template function_tools: list[ToolDefinition] = [] output_tools: list[ToolDefinition] = [] diff --git a/pydantic_ai_slim/pydantic_ai/prompt_config.py b/pydantic_ai_slim/pydantic_ai/prompt_config.py index 6eef5cc25f..cae47e53b2 100644 --- a/pydantic_ai_slim/pydantic_ai/prompt_config.py +++ b/pydantic_ai_slim/pydantic_ai/prompt_config.py @@ -120,7 +120,8 @@ class PromptTemplates: prompted_output_template: str | None = None """Template for prompted output schema instructions. - If `None`, uses the template from `PromptedOutput` if set, otherwise uses the default template. + If `None`, uses the template from `PromptedOutput` if set, otherwise the model's + profile-specific default template is used. Set explicitly to override the template for all prompted outputs. """