|
3 | 3 | import pytest |
4 | 4 | from unittest.mock import MagicMock, patch |
5 | 5 | import os |
| 6 | +import json |
6 | 7 |
|
| 8 | +import sentry_sdk |
| 9 | +from sentry_sdk import start_span |
| 10 | +from sentry_sdk.consts import SPANDATA |
7 | 11 | from sentry_sdk.integrations.openai_agents import OpenAIAgentsIntegration |
8 | | -from sentry_sdk.integrations.openai_agents.utils import safe_serialize |
| 12 | +from sentry_sdk.integrations.openai_agents.utils import _set_input_data, safe_serialize |
9 | 13 | from sentry_sdk.utils import parse_version |
10 | 14 |
|
11 | 15 | import agents |
@@ -1225,3 +1229,46 @@ def failing_tool(message: str) -> str: |
1225 | 1229 | # The span should be marked as error because the tool execution failed |
1226 | 1230 | assert execute_tool_span["status"] == "internal_error" |
1227 | 1231 | assert execute_tool_span["tags"]["status"] == "internal_error" |
| 1232 | + |
| 1233 | + |
| 1234 | +def test_openai_agents_message_truncation(sentry_init, capture_events): |
| 1235 | + """Test that large messages are truncated properly in OpenAI Agents integration.""" |
| 1236 | + |
| 1237 | + large_content = ( |
| 1238 | + "This is a very long message that will exceed our size limits. " * 1000 |
| 1239 | + ) |
| 1240 | + |
| 1241 | + sentry_init( |
| 1242 | + integrations=[OpenAIAgentsIntegration()], |
| 1243 | + traces_sample_rate=1.0, |
| 1244 | + send_default_pii=True, |
| 1245 | + ) |
| 1246 | + |
| 1247 | + test_messages = [ |
| 1248 | + {"role": "system", "content": "small message 1"}, |
| 1249 | + {"role": "user", "content": large_content}, |
| 1250 | + {"role": "assistant", "content": large_content}, |
| 1251 | + {"role": "user", "content": "small message 4"}, |
| 1252 | + {"role": "assistant", "content": "small message 5"}, |
| 1253 | + ] |
| 1254 | + |
| 1255 | + get_response_kwargs = {"input": test_messages} |
| 1256 | + |
| 1257 | + with start_span(op="gen_ai.chat") as span: |
| 1258 | + scope = sentry_sdk.get_current_scope() |
| 1259 | + _set_input_data(span, get_response_kwargs) |
| 1260 | + if hasattr(scope, "_gen_ai_original_message_count"): |
| 1261 | + truncated_count = scope._gen_ai_original_message_count.get(span.span_id) |
| 1262 | + assert truncated_count == 5, ( |
| 1263 | + f"Expected 5 original messages, got {truncated_count}" |
| 1264 | + ) |
| 1265 | + |
| 1266 | + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span._data |
| 1267 | + messages_data = span._data[SPANDATA.GEN_AI_REQUEST_MESSAGES] |
| 1268 | + assert isinstance(messages_data, str) |
| 1269 | + |
| 1270 | + parsed_messages = json.loads(messages_data) |
| 1271 | + assert isinstance(parsed_messages, list) |
| 1272 | + assert len(parsed_messages) == 2 |
| 1273 | + assert "small message 4" in str(parsed_messages[0]) |
| 1274 | + assert "small message 5" in str(parsed_messages[1]) |
0 commit comments