From 6e6f667d6e1adc8ab6399bd980bb8e22c4c78824 Mon Sep 17 00:00:00 2001 From: Motta Kin Date: Sun, 7 Dec 2025 23:10:03 +0100 Subject: [PATCH 1/9] #3621 - Pass s3:// file URLs directly to API in BedrockConverseModel --- .../pydantic_ai/models/bedrock.py | 15 ++++--- .../test_s3_document_url_input.yaml | 42 +++++++++++++++++++ .../test_bedrock/test_s3_image_url_input.yaml | 42 +++++++++++++++++++ .../test_bedrock/test_s3_video_url_input.yaml | 42 +++++++++++++++++++ tests/models/test_bedrock.py | 39 +++++++++++++++++ 5 files changed, 175 insertions(+), 5 deletions(-) create mode 100644 tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml create mode 100644 tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml create mode 100644 tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml diff --git a/pydantic_ai_slim/pydantic_ai/models/bedrock.py b/pydantic_ai_slim/pydantic_ai/models/bedrock.py index 5589b57d3e..8b7740f20b 100644 --- a/pydantic_ai_slim/pydantic_ai/models/bedrock.py +++ b/pydantic_ai_slim/pydantic_ai/models/bedrock.py @@ -733,12 +733,17 @@ async def _map_user_prompt( # noqa: C901 else: raise NotImplementedError('Binary content is not supported yet.') elif isinstance(item, ImageUrl | DocumentUrl | VideoUrl): - downloaded_item = await download_item(item, data_format='bytes', type_format='extension') - format = downloaded_item['data_type'] + source: dict[str, Any] + if item.url.startswith('s3://'): + source = {'s3Location': {'uri': item.url}} + else: + downloaded_item = await download_item(item, data_format='bytes', type_format='extension') + source = {'bytes': downloaded_item['data']} + if item.kind == 'image-url': format = item.media_type.split('/')[1] assert format in ('jpeg', 'png', 'gif', 'webp'), f'Unsupported image format: {format}' - image: ImageBlockTypeDef = {'format': format, 'source': {'bytes': downloaded_item['data']}} + image: ImageBlockTypeDef = {'format': format, 'source': cast(Any, source)} content.append({'image': image}) elif item.kind == 'document-url': @@ -746,7 +751,7 @@ async def _map_user_prompt( # noqa: C901 document: DocumentBlockTypeDef = { 'name': name, 'format': item.format, - 'source': {'bytes': downloaded_item['data']}, + 'source': cast(Any, source), } content.append({'document': document}) @@ -763,7 +768,7 @@ async def _map_user_prompt( # noqa: C901 'wmv', 'three_gp', ), f'Unsupported video format: {format}' - video: VideoBlockTypeDef = {'format': format, 'source': {'bytes': downloaded_item['data']}} + video: VideoBlockTypeDef = {'format': format, 'source': cast(Any, source)} content.append({'video': video}) elif isinstance(item, AudioUrl): # pragma: no cover raise NotImplementedError('Audio is not supported yet.') diff --git a/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml new file mode 100644 index 0000000000..b2f8afb235 --- /dev/null +++ b/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml @@ -0,0 +1,42 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "What is the main content on this document?"}, {"document": {"format": "pdf", "name": "test-doc.pdf", "source": {"s3Location": {"uri": "s3://my-bucket/documents/test-doc.pdf"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' + headers: + amz-sdk-invocation-id: + - !!binary | + ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + content-length: + - '280' + content-type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-v2/converse + response: + headers: + connection: + - keep-alive + content-length: + - '420' + content-type: + - application/json + parsed_body: + metrics: + latencyMs: 600 + output: + message: + content: + - text: Based on the provided document, the main content discusses best practices for cloud storage and data management. + role: assistant + stopReason: end_turn + usage: + inputTokens: 35 + outputTokens: 18 + totalTokens: 53 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml new file mode 100644 index 0000000000..18fd09e214 --- /dev/null +++ b/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml @@ -0,0 +1,42 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "What is in this image?"}, {"image": {"format": "jpeg", "source": {"s3Location": {"uri": "s3://my-bucket/images/test-image.jpg"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' + headers: + amz-sdk-invocation-id: + - !!binary | + ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + content-length: + - '250' + content-type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-pro-v1%3A0/converse + response: + headers: + connection: + - keep-alive + content-length: + - '400' + content-type: + - application/json + parsed_body: + metrics: + latencyMs: 450 + output: + message: + content: + - text: The image shows a scenic landscape with mountains in the background and a clear blue sky above. + role: assistant + stopReason: end_turn + usage: + inputTokens: 25 + outputTokens: 20 + totalTokens: 45 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml new file mode 100644 index 0000000000..a4334cb379 --- /dev/null +++ b/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml @@ -0,0 +1,42 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "Describe this video"}, {"video": {"format": "mp4", "source": {"s3Location": {"uri": "s3://my-bucket/videos/test-video.mp4"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' + headers: + amz-sdk-invocation-id: + - !!binary | + ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + content-length: + - '250' + content-type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-pro-v1%3A0/converse + response: + headers: + connection: + - keep-alive + content-length: + - '400' + content-type: + - application/json + parsed_body: + metrics: + latencyMs: 550 + output: + message: + content: + - text: The video shows a time-lapse of a sunset over the ocean with waves gently rolling onto the shore. + role: assistant + stopReason: end_turn + usage: + inputTokens: 30 + outputTokens: 22 + totalTokens: 52 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 185468021a..10c44edb4f 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -739,6 +739,45 @@ async def test_text_document_url_input(allow_model_requests: None, bedrock_provi ) +@pytest.mark.vcr() +async def test_s3_image_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): + """Test that s3:// image URLs are passed directly to Bedrock API without downloading.""" + m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) + agent = Agent(m, system_prompt='You are a helpful chatbot.') + image_url = ImageUrl(url='s3://my-bucket/images/test-image.jpg', media_type='image/jpeg') + + result = await agent.run(['What is in this image?', image_url]) + assert result.output == snapshot( + 'The image shows a scenic landscape with mountains in the background and a clear blue sky above.' + ) + + +@pytest.mark.vcr() +async def test_s3_video_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): + """Test that s3:// video URLs are passed directly to Bedrock API.""" + m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) + agent = Agent(m, system_prompt='You are a helpful chatbot.') + video_url = VideoUrl(url='s3://my-bucket/videos/test-video.mp4', media_type='video/mp4') + + result = await agent.run(['Describe this video', video_url]) + assert result.output == snapshot( + 'The video shows a time-lapse of a sunset over the ocean with waves gently rolling onto the shore.' + ) + + +@pytest.mark.vcr() +async def test_s3_document_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): + """Test that s3:// document URLs are passed directly to Bedrock API.""" + m = BedrockConverseModel('anthropic.claude-v2', provider=bedrock_provider) + agent = Agent(m, system_prompt='You are a helpful chatbot.') + document_url = DocumentUrl(url='s3://my-bucket/documents/test-doc.pdf', media_type='application/pdf') + + result = await agent.run(['What is the main content on this document?', document_url]) + assert result.output == snapshot( + 'Based on the provided document, the main content discusses best practices for cloud storage and data management.' + ) + + @pytest.mark.vcr() async def test_text_as_binary_content_input(allow_model_requests: None, bedrock_provider: BedrockProvider): m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) From 9631781747648ebaa7528646f90e3376189a0f4d Mon Sep 17 00:00:00 2001 From: Motta Kin Date: Wed, 10 Dec 2025 19:25:38 +0100 Subject: [PATCH 2/9] Cast source to specific type; update tests to use _map_messages --- .../pydantic_ai/models/bedrock.py | 7 +- tests/models/test_bedrock.py | 95 +++++++++++++++---- 2 files changed, 78 insertions(+), 24 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/models/bedrock.py b/pydantic_ai_slim/pydantic_ai/models/bedrock.py index 8b7740f20b..8c2a01104d 100644 --- a/pydantic_ai_slim/pydantic_ai/models/bedrock.py +++ b/pydantic_ai_slim/pydantic_ai/models/bedrock.py @@ -11,6 +11,7 @@ import anyio.to_thread from botocore.exceptions import ClientError +from mypy_boto3_bedrock_runtime.type_defs import DocumentSourceTypeDef from typing_extensions import ParamSpec, assert_never from pydantic_ai import ( @@ -743,7 +744,7 @@ async def _map_user_prompt( # noqa: C901 if item.kind == 'image-url': format = item.media_type.split('/')[1] assert format in ('jpeg', 'png', 'gif', 'webp'), f'Unsupported image format: {format}' - image: ImageBlockTypeDef = {'format': format, 'source': cast(Any, source)} + image: ImageBlockTypeDef = {'format': format, 'source': cast(DocumentSourceTypeDef, source)} content.append({'image': image}) elif item.kind == 'document-url': @@ -751,7 +752,7 @@ async def _map_user_prompt( # noqa: C901 document: DocumentBlockTypeDef = { 'name': name, 'format': item.format, - 'source': cast(Any, source), + 'source': cast(DocumentSourceTypeDef, source), } content.append({'document': document}) @@ -768,7 +769,7 @@ async def _map_user_prompt( # noqa: C901 'wmv', 'three_gp', ), f'Unsupported video format: {format}' - video: VideoBlockTypeDef = {'format': format, 'source': cast(Any, source)} + video: VideoBlockTypeDef = {'format': format, 'source': cast(DocumentSourceTypeDef, source)} content.append({'video': video}) elif isinstance(item, AudioUrl): # pragma: no cover raise NotImplementedError('Audio is not supported yet.') diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 10c44edb4f..a90d47df95 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -739,42 +739,95 @@ async def test_text_document_url_input(allow_model_requests: None, bedrock_provi ) -@pytest.mark.vcr() -async def test_s3_image_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): +async def test_s3_image_url_input(bedrock_provider: BedrockProvider): """Test that s3:// image URLs are passed directly to Bedrock API without downloading.""" - m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) - agent = Agent(m, system_prompt='You are a helpful chatbot.') + model = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) image_url = ImageUrl(url='s3://my-bucket/images/test-image.jpg', media_type='image/jpeg') - result = await agent.run(['What is in this image?', image_url]) - assert result.output == snapshot( - 'The image shows a scenic landscape with mountains in the background and a clear blue sky above.' + req = [ + ModelRequest(parts=[UserPromptPart(content=['What is in this image?', image_url])]), + ] + + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + + assert bedrock_messages == snapshot( + [ + { + 'role': 'user', + 'content': [ + {'text': 'What is in this image?'}, + { + 'image': { + 'format': 'jpeg', + 'source': {'s3Location': {'uri': 's3://my-bucket/images/test-image.jpg'}}, + } + }, + ], + } + ] ) -@pytest.mark.vcr() -async def test_s3_video_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): +async def test_s3_video_url_input(bedrock_provider: BedrockProvider): """Test that s3:// video URLs are passed directly to Bedrock API.""" - m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) - agent = Agent(m, system_prompt='You are a helpful chatbot.') + model = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) video_url = VideoUrl(url='s3://my-bucket/videos/test-video.mp4', media_type='video/mp4') - result = await agent.run(['Describe this video', video_url]) - assert result.output == snapshot( - 'The video shows a time-lapse of a sunset over the ocean with waves gently rolling onto the shore.' + # Create a ModelRequest with the S3 video URL + req = [ + ModelRequest(parts=[UserPromptPart(content=['Describe this video', video_url])]), + ] + + # Call the mapping function directly + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + + assert bedrock_messages == snapshot( + [ + { + 'role': 'user', + 'content': [ + {'text': 'Describe this video'}, + { + 'video': { + 'format': 'mp4', + 'source': {'s3Location': {'uri': 's3://my-bucket/videos/test-video.mp4'}}, + } + }, + ], + } + ] ) -@pytest.mark.vcr() -async def test_s3_document_url_input(allow_model_requests: None, bedrock_provider: BedrockProvider): +async def test_s3_document_url_input(bedrock_provider: BedrockProvider): """Test that s3:// document URLs are passed directly to Bedrock API.""" - m = BedrockConverseModel('anthropic.claude-v2', provider=bedrock_provider) - agent = Agent(m, system_prompt='You are a helpful chatbot.') + model = BedrockConverseModel('anthropic.claude-v2', provider=bedrock_provider) document_url = DocumentUrl(url='s3://my-bucket/documents/test-doc.pdf', media_type='application/pdf') - result = await agent.run(['What is the main content on this document?', document_url]) - assert result.output == snapshot( - 'Based on the provided document, the main content discusses best practices for cloud storage and data management.' + # Create a ModelRequest with the S3 document URL + req = [ + ModelRequest(parts=[UserPromptPart(content=['What is the main content on this document?', document_url])]), + ] + + # Call the mapping function directly + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + + assert bedrock_messages == snapshot( + [ + { + 'role': 'user', + 'content': [ + {'text': 'What is the main content on this document?'}, + { + 'document': { + 'format': 'pdf', + 'name': 'Document 1', + 'source': {'s3Location': {'uri': 's3://my-bucket/documents/test-doc.pdf'}}, + } + }, + ], + } + ] ) From fb584c418000d2de4101d478cb393e0cb73c148b Mon Sep 17 00:00:00 2001 From: Mottakin Date: Fri, 12 Dec 2025 21:31:21 +0100 Subject: [PATCH 3/9] Add support for bucketOwner; update tests --- .../pydantic_ai/models/bedrock.py | 5 ++- .../test_s3_document_url_input.yaml | 42 ------------------ .../test_bedrock/test_s3_image_url_input.yaml | 42 ------------------ .../test_bedrock/test_s3_video_url_input.yaml | 42 ------------------ tests/models/test_bedrock.py | 44 ++++++++++++++++--- 5 files changed, 41 insertions(+), 134 deletions(-) delete mode 100644 tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml delete mode 100644 tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml delete mode 100644 tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml diff --git a/pydantic_ai_slim/pydantic_ai/models/bedrock.py b/pydantic_ai_slim/pydantic_ai/models/bedrock.py index 8c2a01104d..7e64ffa2e5 100644 --- a/pydantic_ai_slim/pydantic_ai/models/bedrock.py +++ b/pydantic_ai_slim/pydantic_ai/models/bedrock.py @@ -736,7 +736,10 @@ async def _map_user_prompt( # noqa: C901 elif isinstance(item, ImageUrl | DocumentUrl | VideoUrl): source: dict[str, Any] if item.url.startswith('s3://'): - source = {'s3Location': {'uri': item.url}} + s3_location: dict[str, str] = {'uri': item.url.split('?')[0]} + if '?bucketOwner=' in item.url: + s3_location['bucketOwner'] = item.url.split('?bucketOwner=')[1] + source = {'s3Location': s3_location} else: downloaded_item = await download_item(item, data_format='bytes', type_format='extension') source = {'bytes': downloaded_item['data']} diff --git a/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml deleted file mode 100644 index b2f8afb235..0000000000 --- a/tests/models/cassettes/test_bedrock/test_s3_document_url_input.yaml +++ /dev/null @@ -1,42 +0,0 @@ -interactions: -- request: - body: '{"messages": [{"role": "user", "content": [{"text": "What is the main content on this document?"}, {"document": {"format": "pdf", "name": "test-doc.pdf", "source": {"s3Location": {"uri": "s3://my-bucket/documents/test-doc.pdf"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' - headers: - amz-sdk-invocation-id: - - !!binary | - ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 - amz-sdk-request: - - !!binary | - YXR0ZW1wdD0x - content-length: - - '280' - content-type: - - !!binary | - YXBwbGljYXRpb24vanNvbg== - method: POST - uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-v2/converse - response: - headers: - connection: - - keep-alive - content-length: - - '420' - content-type: - - application/json - parsed_body: - metrics: - latencyMs: 600 - output: - message: - content: - - text: Based on the provided document, the main content discusses best practices for cloud storage and data management. - role: assistant - stopReason: end_turn - usage: - inputTokens: 35 - outputTokens: 18 - totalTokens: 53 - status: - code: 200 - message: OK -version: 1 diff --git a/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml deleted file mode 100644 index 18fd09e214..0000000000 --- a/tests/models/cassettes/test_bedrock/test_s3_image_url_input.yaml +++ /dev/null @@ -1,42 +0,0 @@ -interactions: -- request: - body: '{"messages": [{"role": "user", "content": [{"text": "What is in this image?"}, {"image": {"format": "jpeg", "source": {"s3Location": {"uri": "s3://my-bucket/images/test-image.jpg"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' - headers: - amz-sdk-invocation-id: - - !!binary | - ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 - amz-sdk-request: - - !!binary | - YXR0ZW1wdD0x - content-length: - - '250' - content-type: - - !!binary | - YXBwbGljYXRpb24vanNvbg== - method: POST - uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-pro-v1%3A0/converse - response: - headers: - connection: - - keep-alive - content-length: - - '400' - content-type: - - application/json - parsed_body: - metrics: - latencyMs: 450 - output: - message: - content: - - text: The image shows a scenic landscape with mountains in the background and a clear blue sky above. - role: assistant - stopReason: end_turn - usage: - inputTokens: 25 - outputTokens: 20 - totalTokens: 45 - status: - code: 200 - message: OK -version: 1 diff --git a/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml b/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml deleted file mode 100644 index a4334cb379..0000000000 --- a/tests/models/cassettes/test_bedrock/test_s3_video_url_input.yaml +++ /dev/null @@ -1,42 +0,0 @@ -interactions: -- request: - body: '{"messages": [{"role": "user", "content": [{"text": "Describe this video"}, {"video": {"format": "mp4", "source": {"s3Location": {"uri": "s3://my-bucket/videos/test-video.mp4"}}}}]}], "system": [{"text": "You are a helpful chatbot."}], "inferenceConfig": {}}' - headers: - amz-sdk-invocation-id: - - !!binary | - ZGQxNWI1ODItMTk4Yy00NWZhLTllZjYtODFlY2IzZmUxNWM2 - amz-sdk-request: - - !!binary | - YXR0ZW1wdD0x - content-length: - - '250' - content-type: - - !!binary | - YXBwbGljYXRpb24vanNvbg== - method: POST - uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/us.amazon.nova-pro-v1%3A0/converse - response: - headers: - connection: - - keep-alive - content-length: - - '400' - content-type: - - application/json - parsed_body: - metrics: - latencyMs: 550 - output: - message: - content: - - text: The video shows a time-lapse of a sunset over the ocean with waves gently rolling onto the shore. - role: assistant - stopReason: end_turn - usage: - inputTokens: 30 - outputTokens: 22 - totalTokens: 52 - status: - code: 200 - message: OK -version: 1 diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index a90d47df95..9647a0eb7c 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -748,7 +748,7 @@ async def test_s3_image_url_input(bedrock_provider: BedrockProvider): ModelRequest(parts=[UserPromptPart(content=['What is in this image?', image_url])]), ] - _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters(), None) # type: ignore[reportPrivateUsage] assert bedrock_messages == snapshot( [ @@ -773,13 +773,11 @@ async def test_s3_video_url_input(bedrock_provider: BedrockProvider): model = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) video_url = VideoUrl(url='s3://my-bucket/videos/test-video.mp4', media_type='video/mp4') - # Create a ModelRequest with the S3 video URL req = [ ModelRequest(parts=[UserPromptPart(content=['Describe this video', video_url])]), ] - # Call the mapping function directly - _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters(), None) # type: ignore[reportPrivateUsage] assert bedrock_messages == snapshot( [ @@ -804,13 +802,11 @@ async def test_s3_document_url_input(bedrock_provider: BedrockProvider): model = BedrockConverseModel('anthropic.claude-v2', provider=bedrock_provider) document_url = DocumentUrl(url='s3://my-bucket/documents/test-doc.pdf', media_type='application/pdf') - # Create a ModelRequest with the S3 document URL req = [ ModelRequest(parts=[UserPromptPart(content=['What is the main content on this document?', document_url])]), ] - # Call the mapping function directly - _, bedrock_messages = await model._map_messages(req, ModelRequestParameters()) # type: ignore[reportPrivateUsage] + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters(), None) # type: ignore[reportPrivateUsage] assert bedrock_messages == snapshot( [ @@ -831,6 +827,40 @@ async def test_s3_document_url_input(bedrock_provider: BedrockProvider): ) +async def test_s3_url_with_bucket_owner(bedrock_provider: BedrockProvider): + """Test that s3:// URLs with bucketOwner parameter are parsed correctly.""" + model = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) + image_url = ImageUrl(url='s3://my-bucket/images/test-image.jpg?bucketOwner=123456789012', media_type='image/jpeg') + + req = [ + ModelRequest(parts=[UserPromptPart(content=['What is in this image?', image_url])]), + ] + + _, bedrock_messages = await model._map_messages(req, ModelRequestParameters(), None) # type: ignore[reportPrivateUsage] + + assert bedrock_messages == snapshot( + [ + { + 'role': 'user', + 'content': [ + {'text': 'What is in this image?'}, + { + 'image': { + 'format': 'jpeg', + 'source': { + 's3Location': { + 'uri': 's3://my-bucket/images/test-image.jpg', + 'bucketOwner': '123456789012', + } + }, + } + }, + ], + } + ] + ) + + @pytest.mark.vcr() async def test_text_as_binary_content_input(allow_model_requests: None, bedrock_provider: BedrockProvider): m = BedrockConverseModel('us.amazon.nova-pro-v1:0', provider=bedrock_provider) From 87cea32070a96dd0f514244829813f6eaaae9ca6 Mon Sep 17 00:00:00 2001 From: Mottakin Date: Fri, 12 Dec 2025 22:29:35 +0100 Subject: [PATCH 4/9] Avoid supporing download item from s3 --- pydantic_ai_slim/pydantic_ai/models/__init__.py | 2 ++ tests/models/test_download_item.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py index 24ce25c3ae..7fc64ead4a 100644 --- a/pydantic_ai_slim/pydantic_ai/models/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py @@ -1172,6 +1172,8 @@ async def download_item( """ if item.url.startswith('gs://'): raise UserError('Downloading from protocol "gs://" is not supported.') + elif item.url.startswith('s3://'): + raise UserError('Downloading from protocol "s3://" is not supported.') elif isinstance(item, VideoUrl) and item.is_youtube: raise UserError('Downloading YouTube videos is not supported.') diff --git a/tests/models/test_download_item.py b/tests/models/test_download_item.py index 845bc8473c..4087fa6f33 100644 --- a/tests/models/test_download_item.py +++ b/tests/models/test_download_item.py @@ -24,6 +24,22 @@ async def test_download_item_raises_user_error_with_gs_uri( _ = await download_item(url, data_format='bytes') +@pytest.mark.parametrize( + 'url', + ( + pytest.param(AudioUrl(url='s3://my-bucket/audio.wav')), + pytest.param(DocumentUrl(url='s3://my-bucket/document.pdf')), + pytest.param(ImageUrl(url='s3://my-bucket/image.png')), + pytest.param(VideoUrl(url='s3://my-bucket/video.mp4')), + ), +) +async def test_download_item_raises_user_error_with_s3_uri( + url: AudioUrl | DocumentUrl | ImageUrl | VideoUrl, +) -> None: + with pytest.raises(UserError, match='Downloading from protocol "s3://" is not supported.'): + _ = await download_item(url, data_format='bytes') + + async def test_download_item_raises_user_error_with_youtube_url() -> None: with pytest.raises(UserError, match='Downloading YouTube videos is not supported.'): _ = await download_item(VideoUrl(url='https://youtu.be/lCdaVNyHtjU'), data_format='bytes') From a77eeaab7d55ea2b8c999f46654afe81bf2822e6 Mon Sep 17 00:00:00 2001 From: Mottakin Date: Fri, 12 Dec 2025 23:11:28 +0100 Subject: [PATCH 5/9] Update input.md --- docs/input.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/input.md b/docs/input.md index f60516f373..5c5eef63e9 100644 --- a/docs/input.md +++ b/docs/input.md @@ -112,7 +112,7 @@ Some model APIs do not support file URLs at all or for specific file types. In t - [`OpenAIResponsesModel`][pydantic_ai.models.openai.OpenAIResponsesModel]: All URLs - [`AnthropicModel`][pydantic_ai.models.anthropic.AnthropicModel]: `DocumentUrl` with media type `text/plain` - [`GoogleModel`][pydantic_ai.models.google.GoogleModel] using GLA (Gemini Developer API): All URLs except YouTube video URLs and files uploaded to the [Files API](https://ai.google.dev/gemini-api/docs/files). -- [`BedrockConverseModel`][pydantic_ai.models.bedrock.BedrockConverseModel]: All URLs +- [`BedrockConverseModel`][pydantic_ai.models.bedrock.BedrockConverseModel]: All URLs except the ones uploaded to S3, specifically starting with `s3://`. If the model API supports file URLs but may not be able to download a file because of crawling or access restrictions, you can instruct Pydantic AI to download the file content and send that instead of the URL by enabling the `force_download` flag on the URL object. For example, [`GoogleModel`][pydantic_ai.models.google.GoogleModel] on Vertex AI limits YouTube video URLs to one URL per request. @@ -138,3 +138,5 @@ result = agent.run_sync( ) print(result.output) ``` + +For `BedrockConverseModel`, you can upload a file to S3 and pass the URL to the API directly. Pyadantic AI will pass it to `bedrock` as is. It expects the URL with the format: `s3:///`. An optional `bucketOwner` query parameter can be added to the URL to specify the owner of the bucket. For example: `s3://my-bucket/my-file.png?bucketOwner=123456789012`. From 076635ec736b78d8777da6acba38063bca01e42b Mon Sep 17 00:00:00 2001 From: Mottakin Date: Fri, 12 Dec 2025 23:22:34 +0100 Subject: [PATCH 6/9] Fix sentence structure for doc update --- docs/input.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/input.md b/docs/input.md index 5c5eef63e9..77081b9509 100644 --- a/docs/input.md +++ b/docs/input.md @@ -112,7 +112,7 @@ Some model APIs do not support file URLs at all or for specific file types. In t - [`OpenAIResponsesModel`][pydantic_ai.models.openai.OpenAIResponsesModel]: All URLs - [`AnthropicModel`][pydantic_ai.models.anthropic.AnthropicModel]: `DocumentUrl` with media type `text/plain` - [`GoogleModel`][pydantic_ai.models.google.GoogleModel] using GLA (Gemini Developer API): All URLs except YouTube video URLs and files uploaded to the [Files API](https://ai.google.dev/gemini-api/docs/files). -- [`BedrockConverseModel`][pydantic_ai.models.bedrock.BedrockConverseModel]: All URLs except the ones uploaded to S3, specifically starting with `s3://`. +- [`BedrockConverseModel`][pydantic_ai.models.bedrock.BedrockConverseModel]: All URLs except S3 URLs, specifically starting with `s3://`. If the model API supports file URLs but may not be able to download a file because of crawling or access restrictions, you can instruct Pydantic AI to download the file content and send that instead of the URL by enabling the `force_download` flag on the URL object. For example, [`GoogleModel`][pydantic_ai.models.google.GoogleModel] on Vertex AI limits YouTube video URLs to one URL per request. From e4d615a44e18b02455b052673fd5aaed01031f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kacper=20W=C5=82odarczyk?= <111304236+DEENUU1@users.noreply.github.com> Date: Sat, 13 Dec 2025 00:37:38 +0100 Subject: [PATCH 7/9] Add tool timeout support (#3594) --- docs/tools-advanced.md | 32 ++ pydantic_ai_slim/pydantic_ai/_tool_manager.py | 4 +- .../pydantic_ai/agent/__init__.py | 31 +- pydantic_ai_slim/pydantic_ai/tools.py | 13 + .../pydantic_ai/toolsets/function.py | 37 ++- tests/models/test_model_request_parameters.py | 2 + tests/test_logfire.py | 2 + tests/test_tools.py | 305 ++++++++++++++++++ 8 files changed, 418 insertions(+), 8 deletions(-) diff --git a/docs/tools-advanced.md b/docs/tools-advanced.md index 5b6d6afea4..d3e6f0d8bd 100644 --- a/docs/tools-advanced.md +++ b/docs/tools-advanced.md @@ -371,6 +371,38 @@ def my_flaky_tool(query: str) -> str: Raising `ModelRetry` also generates a `RetryPromptPart` containing the exception message, which is sent back to the LLM to guide its next attempt. Both `ValidationError` and `ModelRetry` respect the `retries` setting configured on the `Tool` or `Agent`. +### Tool Timeout + +You can set a timeout for tool execution to prevent tools from running indefinitely. If a tool exceeds its timeout, it is treated as a failure and a retry prompt is sent to the model (counting towards the retry limit). + +```python +import asyncio + +from pydantic_ai import Agent + +# Set a default timeout for all tools on the agent +agent = Agent('test', tool_timeout=30) + + +@agent.tool_plain +async def slow_tool() -> str: + """This tool will use the agent's default timeout (30 seconds).""" + await asyncio.sleep(10) + return 'Done' + + +@agent.tool_plain(timeout=5) +async def fast_tool() -> str: + """This tool has its own timeout (5 seconds) that overrides the agent default.""" + await asyncio.sleep(1) + return 'Done' +``` + +- **Agent-level timeout**: Set `tool_timeout` on the [`Agent`][pydantic_ai.Agent] to apply a default timeout to all tools. +- **Per-tool timeout**: Set `timeout` on individual tools via [`@agent.tool`][pydantic_ai.Agent.tool], [`@agent.tool_plain`][pydantic_ai.Agent.tool_plain], or the [`Tool`][pydantic_ai.tools.Tool] dataclass. This overrides the agent-level default. + +When a timeout occurs, the tool is considered to have failed and the model receives a retry prompt with the message `"Timed out after {timeout} seconds."`. This counts towards the tool's retry limit just like validation errors or explicit [`ModelRetry`][pydantic_ai.exceptions.ModelRetry] exceptions. + ### Parallel tool calls & concurrency When a model returns multiple tool calls in one response, Pydantic AI schedules them concurrently using `asyncio.create_task`. diff --git a/pydantic_ai_slim/pydantic_ai/_tool_manager.py b/pydantic_ai_slim/pydantic_ai/_tool_manager.py index 9a9f93e1ff..a2730bd852 100644 --- a/pydantic_ai_slim/pydantic_ai/_tool_manager.py +++ b/pydantic_ai_slim/pydantic_ai/_tool_manager.py @@ -172,9 +172,7 @@ async def _call_tool( call.args or {}, allow_partial=pyd_allow_partial, context=ctx.validation_context ) - result = await self.toolset.call_tool(name, args_dict, ctx, tool) - - return result + return await self.toolset.call_tool(name, args_dict, ctx, tool) except (ValidationError, ModelRetry) as e: max_retries = tool.max_retries if tool is not None else 1 current_retry = self.ctx.retries.get(name, 0) diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 863ffe77c4..85ed332d0b 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -152,6 +152,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): _prepare_output_tools: ToolsPrepareFunc[AgentDepsT] | None = dataclasses.field(repr=False) _max_result_retries: int = dataclasses.field(repr=False) _max_tool_retries: int = dataclasses.field(repr=False) + _tool_timeout: float | None = dataclasses.field(repr=False) _validation_context: Any | Callable[[RunContext[AgentDepsT]], Any] = dataclasses.field(repr=False) _event_stream_handler: EventStreamHandler[AgentDepsT] | None = dataclasses.field(repr=False) @@ -184,6 +185,7 @@ def __init__( instrument: InstrumentationSettings | bool | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + tool_timeout: float | None = None, ) -> None: ... @overload @@ -211,6 +213,7 @@ def __init__( instrument: InstrumentationSettings | bool | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + tool_timeout: float | None = None, ) -> None: ... def __init__( @@ -236,6 +239,7 @@ def __init__( instrument: InstrumentationSettings | bool | None = None, history_processors: Sequence[HistoryProcessor[AgentDepsT]] | None = None, event_stream_handler: EventStreamHandler[AgentDepsT] | None = None, + tool_timeout: float | None = None, **_deprecated_kwargs: Any, ): """Create an agent. @@ -290,6 +294,9 @@ def __init__( Each processor takes a list of messages and returns a modified list of messages. Processors can be sync or async and are applied in sequence. event_stream_handler: Optional handler for events from the model's streaming response and the agent's execution of tools. + tool_timeout: Default timeout in seconds for tool execution. If a tool takes longer than this, + the tool is considered to have failed and a retry prompt is returned to the model (counting towards the retry limit). + Individual tools can override this with their own timeout. Defaults to None (no timeout). """ if model is None or defer_model_check: self._model = model @@ -323,6 +330,7 @@ def __init__( self._max_result_retries = output_retries if output_retries is not None else retries self._max_tool_retries = retries + self._tool_timeout = tool_timeout self._validation_context = validation_context @@ -336,7 +344,10 @@ def __init__( self._output_toolset.max_retries = self._max_result_retries self._function_toolset = _AgentFunctionToolset( - tools, max_retries=self._max_tool_retries, output_schema=self._output_schema + tools, + max_retries=self._max_tool_retries, + timeout=self._tool_timeout, + output_schema=self._output_schema, ) self._dynamic_toolsets = [ DynamicToolset[AgentDepsT](toolset_func=toolset) @@ -1036,6 +1047,7 @@ def tool( sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Callable[[ToolFuncContext[AgentDepsT, ToolParams]], ToolFuncContext[AgentDepsT, ToolParams]]: ... def tool( @@ -1054,6 +1066,7 @@ def tool( sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Any: """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. @@ -1103,6 +1116,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. + timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. + Overrides the agent-level `tool_timeout` if set. Defaults to None (no timeout). """ def tool_decorator( @@ -1123,6 +1138,7 @@ def tool_decorator( sequential=sequential, requires_approval=requires_approval, metadata=metadata, + timeout=timeout, ) return func_ @@ -1147,6 +1163,7 @@ def tool_plain( sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Callable[[ToolFuncPlain[ToolParams]], ToolFuncPlain[ToolParams]]: ... def tool_plain( @@ -1165,6 +1182,7 @@ def tool_plain( sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Any: """Decorator to register a tool function which DOES NOT take `RunContext` as an argument. @@ -1214,6 +1232,8 @@ async def spam(ctx: RunContext[str]) -> float: requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. + timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. + Overrides the agent-level `tool_timeout` if set. Defaults to None (no timeout). """ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams]: @@ -1232,6 +1252,7 @@ def tool_decorator(func_: ToolFuncPlain[ToolParams]) -> ToolFuncPlain[ToolParams sequential=sequential, requires_approval=requires_approval, metadata=metadata, + timeout=timeout, ) return func_ @@ -1409,7 +1430,10 @@ def toolsets(self) -> Sequence[AbstractToolset[AgentDepsT]]: if some_tools := self._override_tools.get(): function_toolset = _AgentFunctionToolset( - some_tools.value, max_retries=self._max_tool_retries, output_schema=self._output_schema + some_tools.value, + max_retries=self._max_tool_retries, + timeout=self._tool_timeout, + output_schema=self._output_schema, ) else: function_toolset = self._function_toolset @@ -1516,11 +1540,12 @@ def __init__( tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [], *, max_retries: int = 1, + timeout: float | None = None, id: str | None = None, output_schema: _output.OutputSchema[Any], ): self.output_schema = output_schema - super().__init__(tools, max_retries=max_retries, id=id) + super().__init__(tools, max_retries=max_retries, timeout=timeout, id=id) @property def id(self) -> str: diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py index dcd860b019..900278ce44 100644 --- a/pydantic_ai_slim/pydantic_ai/tools.py +++ b/pydantic_ai_slim/pydantic_ai/tools.py @@ -273,6 +273,7 @@ class Tool(Generic[ToolAgentDepsT]): sequential: bool requires_approval: bool metadata: dict[str, Any] | None + timeout: float | None function_schema: _function_schema.FunctionSchema """ The base JSON schema for the tool's parameters. @@ -296,6 +297,7 @@ def __init__( sequential: bool = False, requires_approval: bool = False, metadata: dict[str, Any] | None = None, + timeout: float | None = None, function_schema: _function_schema.FunctionSchema | None = None, ): """Create a new tool instance. @@ -352,6 +354,8 @@ async def prep_my_tool( requires_approval: Whether this tool requires human-in-the-loop approval. Defaults to False. See the [tools documentation](../deferred-tools.md#human-in-the-loop-tool-approval) for more info. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. + timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. + Defaults to None (no timeout). function_schema: The function schema to use for the tool. If not provided, it will be generated. """ self.function = function @@ -373,6 +377,7 @@ async def prep_my_tool( self.sequential = sequential self.requires_approval = requires_approval self.metadata = metadata + self.timeout = timeout @classmethod def from_schema( @@ -428,6 +433,7 @@ def tool_def(self): strict=self.strict, sequential=self.sequential, metadata=self.metadata, + timeout=self.timeout, kind='unapproved' if self.requires_approval else 'function', ) @@ -514,6 +520,13 @@ class ToolDefinition: For MCP tools, this contains the `meta`, `annotations`, and `output_schema` fields from the tool definition. """ + timeout: float | None = None + """Timeout in seconds for tool execution. + + If the tool takes longer than this, a retry prompt is returned to the model. + Defaults to None (no timeout). + """ + @property def defer(self) -> bool: """Whether calls to this tool will be deferred. diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/function.py b/pydantic_ai_slim/pydantic_ai/toolsets/function.py index e185ed0273..9655643f4b 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/function.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/function.py @@ -4,10 +4,11 @@ from dataclasses import dataclass, replace from typing import Any, overload +import anyio from pydantic.json_schema import GenerateJsonSchema from .._run_context import AgentDepsT, RunContext -from ..exceptions import UserError +from ..exceptions import ModelRetry, UserError from ..tools import ( DocstringFormat, GenerateToolJsonSchema, @@ -25,6 +26,12 @@ class FunctionToolsetTool(ToolsetTool[AgentDepsT]): call_func: Callable[[dict[str, Any], RunContext[AgentDepsT]], Awaitable[Any]] is_async: bool + timeout: float | None = None + """Timeout in seconds for tool execution. + + If the tool takes longer than this, a retry prompt is returned to the model. + Defaults to None (no timeout). + """ class FunctionToolset(AbstractToolset[AgentDepsT]): @@ -35,6 +42,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]): tools: dict[str, Tool[Any]] max_retries: int + timeout: float | None _id: str | None docstring_format: DocstringFormat require_parameter_descriptions: bool @@ -45,6 +53,7 @@ def __init__( tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] = [], *, max_retries: int = 1, + timeout: float | None = None, docstring_format: DocstringFormat = 'auto', require_parameter_descriptions: bool = False, schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema, @@ -60,6 +69,9 @@ def __init__( tools: The tools to add to the toolset. max_retries: The maximum number of retries for each tool during a run. Applies to all tools, unless overridden when adding a tool. + timeout: Timeout in seconds for tool execution. If a tool takes longer than this, + a retry prompt is returned to the model. Individual tools can override this with their own timeout. + Defaults to None (no timeout). docstring_format: Format of tool docstring, see [`DocstringFormat`][pydantic_ai.tools.DocstringFormat]. Defaults to `'auto'`, such that the format is inferred from the structure of the docstring. Applies to all tools, unless overridden when adding a tool. @@ -80,6 +92,7 @@ def __init__( in which case the ID will be used to identify the toolset's activities within the workflow. """ self.max_retries = max_retries + self.timeout = timeout self._id = id self.docstring_format = docstring_format self.require_parameter_descriptions = require_parameter_descriptions @@ -119,6 +132,7 @@ def tool( sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Callable[[ToolFuncEither[AgentDepsT, ToolParams]], ToolFuncEither[AgentDepsT, ToolParams]]: ... def tool( @@ -137,6 +151,7 @@ def tool( sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> Any: """Decorator to register a tool function which takes [`RunContext`][pydantic_ai.tools.RunContext] as its first argument. @@ -193,6 +208,8 @@ async def spam(ctx: RunContext[str], y: float) -> float: If `None`, the default value is determined by the toolset. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata. + timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. + Defaults to None (no timeout). """ def tool_decorator( @@ -213,6 +230,7 @@ def tool_decorator( sequential=sequential, requires_approval=requires_approval, metadata=metadata, + timeout=timeout, ) return func_ @@ -233,6 +251,7 @@ def add_function( sequential: bool | None = None, requires_approval: bool | None = None, metadata: dict[str, Any] | None = None, + timeout: float | None = None, ) -> None: """Add a function as a tool to the toolset. @@ -267,6 +286,8 @@ def add_function( If `None`, the default value is determined by the toolset. metadata: Optional metadata for the tool. This is not sent to the model but can be used for filtering and tool behavior customization. If `None`, the default value is determined by the toolset. If provided, it will be merged with the toolset's metadata. + timeout: Timeout in seconds for tool execution. If the tool takes longer, a retry prompt is returned to the model. + Defaults to None (no timeout). """ if docstring_format is None: docstring_format = self.docstring_format @@ -295,6 +316,7 @@ def add_function( sequential=sequential, requires_approval=requires_approval, metadata=metadata, + timeout=timeout, ) self.add_tool(tool) @@ -340,6 +362,7 @@ async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[ args_validator=tool.function_schema.validator, call_func=tool.function_schema.call, is_async=tool.function_schema.is_async, + timeout=tool_def.timeout, ) return tools @@ -347,4 +370,14 @@ async def call_tool( self, name: str, tool_args: dict[str, Any], ctx: RunContext[AgentDepsT], tool: ToolsetTool[AgentDepsT] ) -> Any: assert isinstance(tool, FunctionToolsetTool) - return await tool.call_func(tool_args, ctx) + + # Per-tool timeout takes precedence over toolset timeout + timeout = tool.timeout if tool.timeout is not None else self.timeout + if timeout is not None: + try: + with anyio.fail_after(timeout): + return await tool.call_func(tool_args, ctx) + except TimeoutError: + raise ModelRetry(f'Timed out after {timeout} seconds.') from None + else: + return await tool.call_func(tool_args, ctx) diff --git a/tests/models/test_model_request_parameters.py b/tests/models/test_model_request_parameters.py index a3f378652f..e11df81a5d 100644 --- a/tests/models/test_model_request_parameters.py +++ b/tests/models/test_model_request_parameters.py @@ -68,6 +68,7 @@ def test_model_request_parameters_are_serializable(): 'sequential': False, 'kind': 'function', 'metadata': None, + 'timeout': None, } ], 'builtin_tools': [ @@ -132,6 +133,7 @@ def test_model_request_parameters_are_serializable(): 'sequential': False, 'kind': 'function', 'metadata': None, + 'timeout': None, } ], 'prompted_output_template': None, diff --git a/tests/test_logfire.py b/tests/test_logfire.py index dadb930dd0..b33e8702e0 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -547,6 +547,7 @@ async def my_ret(x: int) -> str: 'sequential': False, 'kind': 'function', 'metadata': None, + 'timeout': None, } ], 'builtin_tools': [], @@ -994,6 +995,7 @@ class MyOutput: 'sequential': False, 'kind': 'output', 'metadata': None, + 'timeout': None, } ], 'prompted_output_template': None, diff --git a/tests/test_tools.py b/tests/test_tools.py index bcdf537994..0031f702cd 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -150,6 +150,7 @@ def test_docstring_google(docstring_format: Literal['google', 'auto']): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -184,6 +185,7 @@ def test_docstring_sphinx(docstring_format: Literal['sphinx', 'auto']): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -226,6 +228,7 @@ def test_docstring_numpy(docstring_format: Literal['numpy', 'auto']): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -268,6 +271,7 @@ def my_tool(x: int) -> str: # pragma: no cover 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -308,6 +312,7 @@ def my_tool(x: int) -> str: # pragma: no cover 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -354,6 +359,7 @@ def my_tool(x: int) -> str: # pragma: no cover 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -388,6 +394,7 @@ def test_only_returns_type(): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -413,6 +420,7 @@ def test_docstring_unknown(): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -456,6 +464,7 @@ def test_docstring_google_no_body(docstring_format: Literal['google', 'auto']): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -492,6 +501,7 @@ def takes_just_model(model: Foo) -> str: 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -537,6 +547,7 @@ def takes_just_model(model: Foo, z: int) -> str: 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -902,6 +913,7 @@ def test_suppress_griffe_logging(caplog: LogCaptureFixture): 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -974,6 +986,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, }, { 'description': None, @@ -989,6 +1002,7 @@ def my_tool_plain(*, a: int = 1, b: int) -> int: 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, }, ] ) @@ -1077,6 +1091,7 @@ def my_tool(x: Annotated[str | None, WithJsonSchema({'type': 'string'})] = None, 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, }, { 'description': None, @@ -1090,6 +1105,7 @@ def my_tool(x: Annotated[str | None, WithJsonSchema({'type': 'string'})] = None, 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, }, ] ) @@ -1127,6 +1143,7 @@ def get_score(data: Data) -> int: ... # pragma: no branch 'kind': 'function', 'sequential': False, 'metadata': None, + 'timeout': None, } ) @@ -2452,3 +2469,291 @@ def always_fail(ctx: RunContext[None]) -> str: ), ] ) + + +@pytest.mark.anyio +async def test_tool_timeout_triggers_retry(): + """Test that a slow tool triggers RetryPromptPart when timeout is exceeded.""" + import asyncio + + call_count = 0 + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + nonlocal call_count + call_count += 1 + # First call: try the slow tool + if call_count == 1: + return ModelResponse(parts=[ToolCallPart(tool_name='slow_tool', args={}, tool_call_id='call-1')]) + # After receiving retry, return text + return ModelResponse(parts=[TextPart(content='Tool timed out, giving up')]) + + agent = Agent(FunctionModel(model_logic)) + + @agent.tool_plain(timeout=0.1) + async def slow_tool() -> str: + await asyncio.sleep(1.0) # 1 second, but timeout is 0.1s + return 'done' # pragma: no cover + + result = await agent.run('call slow_tool') + + # Check that retry prompt was sent to the model + retry_parts = [ + part + for msg in result.all_messages() + if isinstance(msg, ModelRequest) + for part in msg.parts + if isinstance(part, RetryPromptPart) and 'Timed out' in str(part.content) + ] + assert len(retry_parts) == 1 + assert 'Timed out after 0.1 seconds' in retry_parts[0].content + assert retry_parts[0].tool_name == 'slow_tool' + + +@pytest.mark.anyio +async def test_tool_with_timeout_completes_successfully(): + """Test that a tool completes successfully when within its timeout.""" + import asyncio + + from pydantic_ai.messages import ModelMessage, ModelResponse, TextPart, ToolCallPart + from pydantic_ai.models.function import AgentInfo, FunctionModel + + call_count = 0 + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + nonlocal call_count + call_count += 1 + if call_count == 1: + # First call: ask to run the slow tool + return ModelResponse( + parts=[ToolCallPart(tool_name='slow_but_allowed_tool', args={}, tool_call_id='call-1')] + ) + # Second call: tool completed successfully, return final response + return ModelResponse(parts=[TextPart(content='Tool completed successfully')]) + + agent = Agent(FunctionModel(model_logic)) + + @agent.tool_plain(timeout=5.0) # 5s per-tool timeout + async def slow_but_allowed_tool() -> str: + await asyncio.sleep(0.2) # 200ms - within 5s timeout + return 'completed successfully' + + result = await agent.run('call slow_but_allowed_tool') + + # Should NOT have any retry prompts since tool completed within timeout + retry_parts = [ + part + for msg in result.all_messages() + if isinstance(msg, ModelRequest) + for part in msg.parts + if isinstance(part, RetryPromptPart) and 'Timed out' in str(part.content) + ] + assert len(retry_parts) == 0 + assert 'completed successfully' in result.output + + +@pytest.mark.anyio +async def test_no_timeout_by_default(): + """Test that tools run without timeout by default (backward compatible).""" + import asyncio + + agent = Agent(TestModel()) # No tool_timeout specified + + @agent.tool_plain + async def normal_tool() -> str: + await asyncio.sleep(0.1) + return 'completed' + + result = await agent.run('call normal_tool') + + # Should complete normally without timeout + assert 'completed' in result.output + + +@pytest.mark.anyio +async def test_tool_timeout_retry_counts_as_failed(): + """Test that timeout counts toward tool retry limit.""" + import asyncio + + agent = Agent(TestModel(), retries=2) + + call_count = 0 + + @agent.tool_plain(timeout=0.05) + async def flaky_tool() -> str: + nonlocal call_count + call_count += 1 + if call_count < 3: + await asyncio.sleep(1.0) # Will timeout + return 'finally done' + + await agent.run('call flaky_tool') + + # Tool should have been called 3 times (initial + 2 retries) + assert call_count == 3 + + +@pytest.mark.anyio +async def test_tool_timeout_message_format(): + """Test the format of the retry prompt message on timeout.""" + import asyncio + + call_count = 0 + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + nonlocal call_count + call_count += 1 + if call_count == 1: + return ModelResponse(parts=[ToolCallPart(tool_name='my_slow_tool', args={}, tool_call_id='call-1')]) + return ModelResponse(parts=[TextPart(content='done')]) + + agent = Agent(FunctionModel(model_logic)) + + @agent.tool_plain(timeout=0.1) + async def my_slow_tool() -> str: + await asyncio.sleep(1.0) + return 'done' # pragma: no cover + + result = await agent.run('call my_slow_tool') + + retry_parts = [ + part + for msg in result.all_messages() + if isinstance(msg, ModelRequest) + for part in msg.parts + if isinstance(part, RetryPromptPart) and 'Timed out' in str(part.content) + ] + assert len(retry_parts) == 1 + # Check message contains timeout value (tool_name is in the part, not in content) + assert '0.1' in retry_parts[0].content + assert retry_parts[0].tool_name == 'my_slow_tool' + + +def test_tool_timeout_definition(): + """Test that timeout is properly set on ToolDefinition.""" + agent = Agent(TestModel()) + + @agent.tool_plain(timeout=30.0) + def tool_with_timeout() -> str: + return 'done' # pragma: no cover + + # Get tool definition through the toolset + tool = agent._function_toolset.tools['tool_with_timeout'] + assert tool.timeout == 30.0 + assert tool.tool_def.timeout == 30.0 + + +def test_tool_timeout_default_none(): + """Test that timeout defaults to None when not specified.""" + agent = Agent(TestModel()) + + @agent.tool_plain + def tool_without_timeout() -> str: + return 'done' # pragma: no cover + + tool = agent._function_toolset.tools['tool_without_timeout'] + assert tool.timeout is None + assert tool.tool_def.timeout is None + + +@pytest.mark.anyio +async def test_tool_timeout_exceeds_retry_limit(): + """Test that UnexpectedModelBehavior is raised when timeout exceeds retry limit.""" + import asyncio + + from pydantic_ai.exceptions import UnexpectedModelBehavior + from pydantic_ai.messages import ModelMessage, ModelResponse, ToolCallPart + from pydantic_ai.models.function import AgentInfo, FunctionModel + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + # Always try to call the slow tool + return ModelResponse(parts=[ToolCallPart(tool_name='always_slow_tool', args={}, tool_call_id='call-1')]) + + agent = Agent(FunctionModel(model_logic), retries=1) # Only 1 retry allowed + + @agent.tool_plain(timeout=0.05) + async def always_slow_tool() -> str: + await asyncio.sleep(1.0) # Always timeout + return 'done' # pragma: no cover + + with pytest.raises(UnexpectedModelBehavior, match='exceeded max retries'): + await agent.run('call always_slow_tool') + + +@pytest.mark.anyio +async def test_agent_level_tool_timeout(): + """Test that agent-level tool_timeout applies to all tools.""" + import asyncio + + call_count = 0 + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + nonlocal call_count + call_count += 1 + if call_count == 1: + return ModelResponse(parts=[ToolCallPart(tool_name='slow_tool', args={}, tool_call_id='call-1')]) + return ModelResponse(parts=[TextPart(content='done')]) + + # Set global tool_timeout on Agent + agent = Agent(FunctionModel(model_logic), tool_timeout=0.1) + + @agent.tool_plain + async def slow_tool() -> str: + await asyncio.sleep(1.0) # 1 second, but agent timeout is 0.1s + return 'done' # pragma: no cover + + result = await agent.run('call slow_tool') + + # Check that retry prompt was sent + retry_parts = [ + part + for msg in result.all_messages() + if isinstance(msg, ModelRequest) + for part in msg.parts + if isinstance(part, RetryPromptPart) and 'Timed out' in str(part.content) + ] + assert len(retry_parts) == 1 + assert 'Timed out after 0.1 seconds' in retry_parts[0].content + + +@pytest.mark.anyio +async def test_per_tool_timeout_overrides_agent_timeout(): + """Test that per-tool timeout overrides agent-level timeout.""" + import asyncio + + call_count = 0 + + async def model_logic(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: + nonlocal call_count + call_count += 1 + if call_count == 1: + return ModelResponse(parts=[ToolCallPart(tool_name='fast_timeout_tool', args={}, tool_call_id='call-1')]) + return ModelResponse(parts=[TextPart(content='done')]) + + # Agent has generous 10s timeout, but per-tool timeout is only 0.1s + agent = Agent(FunctionModel(model_logic), tool_timeout=10.0) + + @agent.tool_plain(timeout=0.1) # Per-tool timeout overrides agent timeout + async def fast_timeout_tool() -> str: + await asyncio.sleep(1.0) # 1 second, per-tool timeout is 0.1s + return 'done' # pragma: no cover + + result = await agent.run('call fast_timeout_tool') + + # Should timeout because per-tool timeout (0.1s) is applied, not agent timeout (10s) + retry_parts = [ + part + for msg in result.all_messages() + if isinstance(msg, ModelRequest) + for part in msg.parts + if isinstance(part, RetryPromptPart) and 'Timed out' in str(part.content) + ] + assert len(retry_parts) == 1 + assert 'Timed out after 0.1 seconds' in retry_parts[0].content + + +def test_agent_tool_timeout_passed_to_toolset(): + """Test that agent-level tool_timeout is passed to FunctionToolset as timeout.""" + agent = Agent(TestModel(), tool_timeout=30.0) + + # The agent's tool_timeout should be passed to the toolset as timeout + assert agent._function_toolset.timeout == 30.0 From 77f88a875525bf62d3de31366493e8c959776183 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Fri, 12 Dec 2025 16:37:44 -0700 Subject: [PATCH 8/9] Fix `UIAdapter.dispatch_request` typing (#3721) --- pydantic_ai_slim/pydantic_ai/ui/_adapter.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py index 8cad4aeffb..fd70a55824 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -13,6 +13,7 @@ Generic, Protocol, TypeVar, + cast, runtime_checkable, ) @@ -43,7 +44,6 @@ 'StateDeps', ] - RunInputT = TypeVar('RunInputT') """Type variable for protocol-specific run input types.""" @@ -53,10 +53,12 @@ EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" - StateT = TypeVar('StateT', bound=BaseModel) """Type variable for the state type, which must be a subclass of `BaseModel`.""" +DispatchDepsT = TypeVar('DispatchDepsT') +"""TypeVar for deps to avoid awkwardness with unbound classvar deps.""" + @runtime_checkable class StateHandler(Protocol): @@ -328,18 +330,18 @@ async def dispatch_request( cls, request: Request, *, - agent: AbstractAgent[AgentDepsT, OutputDataT], + agent: AbstractAgent[DispatchDepsT, OutputDataT], message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, - instructions: Instructions[AgentDepsT] = None, - deps: AgentDepsT = None, + instructions: Instructions[DispatchDepsT] = None, + deps: DispatchDepsT = None, output_type: OutputSpec[Any] | None = None, model_settings: ModelSettings | None = None, usage_limits: UsageLimits | None = None, usage: RunUsage | None = None, infer_name: bool = True, - toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + toolsets: Sequence[AbstractToolset[DispatchDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc[EventT] | None = None, ) -> Response: @@ -376,7 +378,11 @@ async def dispatch_request( ) from e try: - adapter = await cls.from_request(request, agent=agent) + # The DepsT comes from `agent`, not from `cls`; the cast is necessary to explain this to pyright + adapter = cast( + UIAdapter[RunInputT, MessageT, EventT, DispatchDepsT, OutputDataT], + await cls.from_request(request, agent=agent), + ) except ValidationError as e: # pragma: no cover return Response( content=e.json(), From 11341e83852ce682e7057d742d79ecf4d4d087bd Mon Sep 17 00:00:00 2001 From: Mottakin Date: Sat, 13 Dec 2025 14:21:20 +0100 Subject: [PATCH 9/9] Update doc and s3 URL handling based on review comments --- docs/input.md | 2 +- .../pydantic_ai/models/bedrock.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/input.md b/docs/input.md index 77081b9509..9f2272068c 100644 --- a/docs/input.md +++ b/docs/input.md @@ -139,4 +139,4 @@ result = agent.run_sync( print(result.output) ``` -For `BedrockConverseModel`, you can upload a file to S3 and pass the URL to the API directly. Pyadantic AI will pass it to `bedrock` as is. It expects the URL with the format: `s3:///`. An optional `bucketOwner` query parameter can be added to the URL to specify the owner of the bucket. For example: `s3://my-bucket/my-file.png?bucketOwner=123456789012`. +`BedrockConverseModel` supports `s3:///` URIs, provided that the assumed role has the `s3:GetObject` permission. An optional `bucketOwner` query parameter must be specified if the bucket is not owned by the account making the request. For example: `s3://my-bucket/my-file.png?bucketOwner=123456789012`. diff --git a/pydantic_ai_slim/pydantic_ai/models/bedrock.py b/pydantic_ai_slim/pydantic_ai/models/bedrock.py index 7e64ffa2e5..5108c5c66f 100644 --- a/pydantic_ai_slim/pydantic_ai/models/bedrock.py +++ b/pydantic_ai_slim/pydantic_ai/models/bedrock.py @@ -8,10 +8,10 @@ from datetime import datetime from itertools import count from typing import TYPE_CHECKING, Any, Generic, Literal, cast, overload +from urllib.parse import parse_qs, urlparse import anyio.to_thread from botocore.exceptions import ClientError -from mypy_boto3_bedrock_runtime.type_defs import DocumentSourceTypeDef from typing_extensions import ParamSpec, assert_never from pydantic_ai import ( @@ -63,6 +63,7 @@ ConverseStreamResponseTypeDef, CountTokensRequestTypeDef, DocumentBlockTypeDef, + DocumentSourceTypeDef, GuardrailConfigurationTypeDef, ImageBlockTypeDef, InferenceConfigurationTypeDef, @@ -70,6 +71,7 @@ PerformanceConfigurationTypeDef, PromptVariableValuesTypeDef, ReasoningContentBlockOutputTypeDef, + S3LocationTypeDef, SystemContentBlockTypeDef, ToolChoiceTypeDef, ToolConfigurationTypeDef, @@ -734,11 +736,12 @@ async def _map_user_prompt( # noqa: C901 else: raise NotImplementedError('Binary content is not supported yet.') elif isinstance(item, ImageUrl | DocumentUrl | VideoUrl): - source: dict[str, Any] + source: DocumentSourceTypeDef if item.url.startswith('s3://'): - s3_location: dict[str, str] = {'uri': item.url.split('?')[0]} - if '?bucketOwner=' in item.url: - s3_location['bucketOwner'] = item.url.split('?bucketOwner=')[1] + parsed = urlparse(item.url) + s3_location: S3LocationTypeDef = {'uri': f'{parsed.scheme}://{parsed.netloc}{parsed.path}'} + if bucket_owner := parse_qs(parsed.query).get('bucketOwner', [None])[0]: + s3_location['bucketOwner'] = bucket_owner source = {'s3Location': s3_location} else: downloaded_item = await download_item(item, data_format='bytes', type_format='extension') @@ -747,7 +750,7 @@ async def _map_user_prompt( # noqa: C901 if item.kind == 'image-url': format = item.media_type.split('/')[1] assert format in ('jpeg', 'png', 'gif', 'webp'), f'Unsupported image format: {format}' - image: ImageBlockTypeDef = {'format': format, 'source': cast(DocumentSourceTypeDef, source)} + image: ImageBlockTypeDef = {'format': format, 'source': source} content.append({'image': image}) elif item.kind == 'document-url': @@ -755,7 +758,7 @@ async def _map_user_prompt( # noqa: C901 document: DocumentBlockTypeDef = { 'name': name, 'format': item.format, - 'source': cast(DocumentSourceTypeDef, source), + 'source': source, } content.append({'document': document}) @@ -772,7 +775,7 @@ async def _map_user_prompt( # noqa: C901 'wmv', 'three_gp', ), f'Unsupported video format: {format}' - video: VideoBlockTypeDef = {'format': format, 'source': cast(DocumentSourceTypeDef, source)} + video: VideoBlockTypeDef = {'format': format, 'source': source} content.append({'video': video}) elif isinstance(item, AudioUrl): # pragma: no cover raise NotImplementedError('Audio is not supported yet.')