From 7722df2562e780c1166397c9beb10911b1c7a19c Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Mon, 18 Aug 2025 12:14:08 +0200 Subject: [PATCH 1/7] feat(langchain): support async invoke & stream in langchain integration --- sentry_sdk/integrations/langchain.py | 138 ++++++++ .../integrations/langchain/test_langchain.py | 331 ++++++++++++++++++ 2 files changed, 469 insertions(+) diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index 7e04a740ed..36b3eef71c 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -78,6 +78,9 @@ def setup_once(): AgentExecutor.invoke = _wrap_agent_executor_invoke(AgentExecutor.invoke) AgentExecutor.stream = _wrap_agent_executor_stream(AgentExecutor.stream) + AgentExecutor.ainvoke = _wrap_agent_executor_ainvoke(AgentExecutor.ainvoke) + AgentExecutor.astream = _wrap_agent_executor_astream(AgentExecutor.astream) + class WatchedSpan: span = None # type: Span @@ -768,3 +771,138 @@ async def new_iterator_async(): return result return new_stream + + +def _wrap_agent_executor_ainvoke(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + + @wraps(f) + async def new_ainvoke(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) + if integration is None: + return await f(self, *args, **kwargs) + + agent_name, tools = _get_request_data(self, args, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_INVOKE_AGENT, + name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent", + origin=LangchainIntegration.origin, + ) as span: + if agent_name: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False) + + if tools: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False + ) + + # Run the agent + result = await f(self, *args, **kwargs) + + input = result.get("input") + if ( + input is not None + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + [ + input, + ], + ) + + output = result.get("output") + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output) + + return result + + new_ainvoke.__wrapped__ = True + return new_ainvoke + + +def _wrap_agent_executor_astream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + + @wraps(f) + def new_astream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) + if integration is None: + return f(self, *args, **kwargs) + + agent_name, tools = _get_request_data(self, args, kwargs) + + span = sentry_sdk.start_span( + op=OP.GEN_AI_INVOKE_AGENT, + name=f"invoke_agent {agent_name}".strip(), + origin=LangchainIntegration.origin, + ) + span.__enter__() + + if agent_name: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + if tools: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False + ) + + input = args[0].get("input") if len(args) >= 1 else None + if ( + input is not None + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + [ + input, + ], + ) + + # Run the agent - this returns an async iterator + result = f(self, *args, **kwargs) + + old_iterator = result + + async def new_iterator_async(): + # type: () -> AsyncIterator[Any] + event = None + try: + async for event in old_iterator: + yield event + finally: + try: + output = event.get("output") if event else None + except Exception: + output = None + + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output) + + span.__exit__(None, None, None) + + return new_iterator_async() + + new_astream.__wrapped__ = True + return new_astream diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 9a06ac05d4..d37990ecdc 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -589,3 +589,334 @@ def test_langchain_callback_list_existing_callback(sentry_init): [handler] = passed_callbacks assert handler is sentry_callback + + +@pytest.mark.asyncio +@pytest.mark.xfail +@pytest.mark.parametrize( + "send_default_pii, include_prompts, use_unknown_llm_type", + [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ], +) +async def test_langchain_agent_ainvoke( + sentry_init, capture_events, send_default_pii, include_prompts, use_unknown_llm_type +): + global llm_type + llm_type = "acme-llm" if use_unknown_llm_type else "openai-chat" + + sentry_init( + integrations=[ + LangchainIntegration( + include_prompts=include_prompts, + ) + ], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + global stream_result_mock + stream_result_mock = Mock( + return_value=[ + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk( + content="The word eudca has 5 letters.", + usage_metadata={ + "input_tokens": 89, + "output_tokens": 28, + "total_tokens": 117, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), + ), + ] + ) + + class AsyncMockOpenAI(MockOpenAI): + async def _agenerate(self, messages, stop=None, run_manager=None, **kwargs): + from langchain_core.outputs import ChatResult, ChatGeneration + + return ChatResult( + generations=[ + [ + ChatGeneration( + message=AIMessageChunk( + content="The word eudca has 5 letters.", + usage_metadata={ + "input_tokens": 89, + "output_tokens": 28, + "total_tokens": 117, + "input_token_details": { + "audio": 0, + "cache_read": 0, + }, + "output_token_details": { + "audio": 0, + "reasoning": 0, + }, + }, + ) + ) + ] + ] + ) + + llm = AsyncMockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(): + result = await agent_executor.ainvoke( + {"input": "How many letters in the word eudca"} + ) + + assert result is not None + tx = events[0] + assert tx["type"] == "transaction" + + # Find the agent invoke span + agent_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.invoke_agent") + assert len(agent_spans) == 1 + + agent_span = agent_spans[0] + assert agent_span["data"]["gen_ai.operation_name"] == "invoke_agent" + assert not agent_span["data"]["gen_ai.response_streaming"] + + if send_default_pii and include_prompts: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in agent_span["data"] + assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == [ + "How many letters in the word eudca" + ] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in agent_span["data"] + else: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in agent_span.get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in agent_span.get("data", {}) + + +@pytest.mark.asyncio +@pytest.mark.xfail +@pytest.mark.parametrize( + "send_default_pii, include_prompts, use_unknown_llm_type", + [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ], +) +async def test_langchain_agent_astream( + sentry_init, capture_events, send_default_pii, include_prompts, use_unknown_llm_type +): + global llm_type + llm_type = "acme-llm" if use_unknown_llm_type else "openai-chat" + + sentry_init( + integrations=[ + LangchainIntegration( + include_prompts=include_prompts, + ) + ], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + global stream_result_mock + stream_result_mock = Mock( + side_effect=[ + [ + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk( + content="", + additional_kwargs={ + "tool_calls": [ + { + "index": 0, + "id": "call_BbeyNhCKa6kYLYzrD40NGm3b", + "function": { + "arguments": '{"word": "eudca"}', + "name": "get_word_length", + }, + "type": "function", + } + ] + }, + ), + ), + ], + [ + ChatGenerationChunk( + text="The word eudca has 5 letters.", + type="ChatGenerationChunk", + message=AIMessageChunk( + content="The word eudca has 5 letters.", + usage_metadata={ + "input_tokens": 89, + "output_tokens": 28, + "total_tokens": 117, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), + ), + ], + ] + ) + + class AsyncMockOpenAI(MockOpenAI): + async def _astream(self, messages, stop=None, run_manager=None, **kwargs): + for x in stream_result_mock(): + yield x + + llm = AsyncMockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(): + events_collected = [] + async for event in agent_executor.astream( + {"input": "How many letters in the word eudca"} + ): + events_collected.append(event) + + assert len(events_collected) > 0 + tx = events[0] + assert tx["type"] == "transaction" + + # Find the agent invoke span + agent_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.invoke_agent") + assert len(agent_spans) == 1 + + agent_span = agent_spans[0] + assert agent_span["data"]["gen_ai.operation_name"] == "invoke_agent" + assert agent_span["data"]["gen_ai.response_streaming"] + + if send_default_pii and include_prompts: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in agent_span["data"] + assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == [ + "How many letters in the word eudca" + ] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in agent_span["data"] + else: + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in agent_span.get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in agent_span.get("data", {}) + + +@pytest.mark.asyncio +async def test_langchain_ainvoke_error(sentry_init, capture_events): + sentry_init( + integrations=[LangchainIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + + class AsyncMockOpenAI(MockOpenAI): + async def _agenerate(self, messages, stop=None, run_manager=None, **kwargs): + raise Exception("API rate limit error") + + llm = AsyncMockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(), pytest.raises(Exception): + await agent_executor.ainvoke({"input": "How many letters in the word eudca"}) + + # Should have captured the error + assert len(events) > 0 + + +@pytest.mark.asyncio +async def test_langchain_astream_error(sentry_init, capture_events): + sentry_init( + integrations=[LangchainIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + + class AsyncMockOpenAI(MockOpenAI): + async def _astream(self, messages, stop=None, run_manager=None, **kwargs): + raise Exception("API rate limit error") + + llm = AsyncMockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(), pytest.raises(Exception): + async for event in agent_executor.astream( + {"input": "How many letters in the word eudca"} + ): + pass # Should error before yielding anything + + # Should have captured the error + assert len(events) > 0 From 7f67f64a5c61558d1dd286d00499b522c10bae02 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Mon, 25 Aug 2025 10:53:28 +0200 Subject: [PATCH 2/7] add pytest-asyncio to tox.jinja --- scripts/populate_tox/tox.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index 4c3b86af81..d46015d821 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -239,6 +239,7 @@ deps = httpx-latest: httpx # Langchain + langchain: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 From 46eebac6090d67e849cfe8218783eaa13dd81a64 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Mon, 25 Aug 2025 14:05:30 +0200 Subject: [PATCH 3/7] add pytest-asyncio to tox.jinja --- scripts/populate_tox/tox.jinja | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index d46015d821..8a7a0fbb82 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -239,7 +239,7 @@ deps = httpx-latest: httpx # Langchain - langchain: pytest-asyncio + langchain-v0.1: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 @@ -248,6 +248,7 @@ deps = langchain-v0.3: langchain-community langchain-v0.3: tiktoken langchain-v0.3: openai + langchain-v0.3: pytest-asyncio langchain-{latest,notiktoken}: langchain langchain-{latest,notiktoken}: langchain-openai langchain-{latest,notiktoken}: openai>=1.6.1 From 80fed1479ccc8b9af018352da91cb769b38119aa Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Mon, 25 Aug 2025 14:06:00 +0200 Subject: [PATCH 4/7] add pytest-asyncio to tox.jinja --- scripts/populate_tox/tox.jinja | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index 8a7a0fbb82..bf576e63c2 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -252,7 +252,9 @@ deps = langchain-{latest,notiktoken}: langchain langchain-{latest,notiktoken}: langchain-openai langchain-{latest,notiktoken}: openai>=1.6.1 + langchain-{latest,notiktoken}: pytest-asyncio langchain-latest: tiktoken~=0.6.0 + langchain-latest: pytest-asyncio # OpenAI openai: pytest-asyncio From 260ef0d7b1cff1f72723d53d2291f304f5af9b7a Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Mon, 25 Aug 2025 14:08:57 +0200 Subject: [PATCH 5/7] update tox.ini --- tox.ini | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tox.ini b/tox.ini index bbc1d57c12..301e356b4d 100644 --- a/tox.ini +++ b/tox.ini @@ -408,6 +408,7 @@ deps = httpx-latest: httpx # Langchain + langchain-v0.1: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 @@ -416,10 +417,13 @@ deps = langchain-v0.3: langchain-community langchain-v0.3: tiktoken langchain-v0.3: openai + langchain-v0.3: pytest-asyncio langchain-{latest,notiktoken}: langchain langchain-{latest,notiktoken}: langchain-openai langchain-{latest,notiktoken}: openai>=1.6.1 + langchain-{latest,notiktoken}: pytest-asyncio langchain-latest: tiktoken~=0.6.0 + langchain-latest: pytest-asyncio # OpenAI openai: pytest-asyncio From 79554000645bdef32cca149c5d0ef1832ed91e8f Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Tue, 26 Aug 2025 09:03:03 +0200 Subject: [PATCH 6/7] hardcode pytest-asnycio dep --- scripts/populate_tox/tox.jinja | 5 +-- tox.ini | 75 ++++++++++++++++------------------ 2 files changed, 36 insertions(+), 44 deletions(-) diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index bf576e63c2..d46015d821 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -239,7 +239,7 @@ deps = httpx-latest: httpx # Langchain - langchain-v0.1: pytest-asyncio + langchain: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 @@ -248,13 +248,10 @@ deps = langchain-v0.3: langchain-community langchain-v0.3: tiktoken langchain-v0.3: openai - langchain-v0.3: pytest-asyncio langchain-{latest,notiktoken}: langchain langchain-{latest,notiktoken}: langchain-openai langchain-{latest,notiktoken}: openai>=1.6.1 - langchain-{latest,notiktoken}: pytest-asyncio langchain-latest: tiktoken~=0.6.0 - langchain-latest: pytest-asyncio # OpenAI openai: pytest-asyncio diff --git a/tox.ini b/tox.ini index 301e356b4d..b355058724 100644 --- a/tox.ini +++ b/tox.ini @@ -10,6 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # + # Last generated: 2025-08-26T08:59:42.512502+00:00 [tox] @@ -136,18 +137,18 @@ envlist = # ~~~ AI ~~~ {py3.8,py3.11,py3.12}-anthropic-v0.16.0 - {py3.8,py3.11,py3.12}-anthropic-v0.32.0 - {py3.8,py3.11,py3.12}-anthropic-v0.48.0 - {py3.8,py3.12,py3.13}-anthropic-v0.64.0 + {py3.8,py3.11,py3.12}-anthropic-v0.31.2 + {py3.8,py3.11,py3.12}-anthropic-v0.46.0 + {py3.8,py3.12,py3.13}-anthropic-v0.62.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 {py3.9,py3.11,py3.12}-cohere-v5.13.12 - {py3.9,py3.11,py3.12}-cohere-v5.17.0 + {py3.9,py3.11,py3.12}-cohere-v5.16.3 {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 - {py3.10,py3.12,py3.13}-openai_agents-v0.2.9 + {py3.10,py3.12,py3.13}-openai_agents-v0.2.6 {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 @@ -162,7 +163,7 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 {py3.6,py3.9,py3.10}-pymongo-v4.0.2 - {py3.9,py3.12,py3.13}-pymongo-v4.14.1 + {py3.9,py3.12,py3.13}-pymongo-v4.14.0 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7}-redis_py_cluster_legacy-v2.0.0 @@ -183,9 +184,9 @@ envlist = {py3.9,py3.12,py3.13}-openfeature-v0.8.2 {py3.7,py3.12,py3.13}-statsig-v0.55.3 - {py3.7,py3.12,py3.13}-statsig-v0.58.4 + {py3.7,py3.12,py3.13}-statsig-v0.57.3 + {py3.7,py3.12,py3.13}-statsig-v0.59.1 {py3.7,py3.12,py3.13}-statsig-v0.61.0 - {py3.7,py3.12,py3.13}-statsig-v0.63.0 {py3.8,py3.12,py3.13}-unleash-v6.0.1 {py3.8,py3.12,py3.13}-unleash-v6.1.0 @@ -201,16 +202,15 @@ envlist = {py3.6,py3.9,py3.10}-gql-v3.4.1 {py3.7,py3.11,py3.12}-gql-v3.5.3 - {py3.9,py3.12,py3.13}-gql-v4.0.0 - {py3.9,py3.12,py3.13}-gql-v4.1.0b0 + {py3.9,py3.12,py3.13}-gql-v4.0.0b0 {py3.6,py3.9,py3.10}-graphene-v3.3 {py3.8,py3.12,py3.13}-graphene-v3.4.3 {py3.8,py3.10,py3.11}-strawberry-v0.209.8 - {py3.8,py3.11,py3.12}-strawberry-v0.233.3 - {py3.9,py3.12,py3.13}-strawberry-v0.257.0 - {py3.9,py3.12,py3.13}-strawberry-v0.280.0 + {py3.8,py3.11,py3.12}-strawberry-v0.232.2 + {py3.8,py3.12,py3.13}-strawberry-v0.255.0 + {py3.9,py3.12,py3.13}-strawberry-v0.278.1 # ~~~ Network ~~~ @@ -251,12 +251,12 @@ envlist = {py3.6,py3.7,py3.8}-flask-v1.1.4 {py3.8,py3.12,py3.13}-flask-v2.3.3 {py3.8,py3.12,py3.13}-flask-v3.0.3 - {py3.9,py3.12,py3.13}-flask-v3.1.2 + {py3.9,py3.12,py3.13}-flask-v3.1.1 {py3.6,py3.9,py3.10}-starlette-v0.16.0 {py3.7,py3.10,py3.11}-starlette-v0.26.1 {py3.8,py3.11,py3.12}-starlette-v0.36.3 - {py3.9,py3.12,py3.13}-starlette-v0.47.3 + {py3.9,py3.12,py3.13}-starlette-v0.47.2 {py3.6,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.91.0 @@ -310,7 +310,7 @@ envlist = {py3.9,py3.12,py3.13}-trytond-v7.6.5 {py3.7,py3.12,py3.13}-typer-v0.15.4 - {py3.7,py3.12,py3.13}-typer-v0.16.1 + {py3.7,py3.12,py3.13}-typer-v0.16.0 @@ -408,7 +408,6 @@ deps = httpx-latest: httpx # Langchain - langchain-v0.1: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 @@ -417,13 +416,10 @@ deps = langchain-v0.3: langchain-community langchain-v0.3: tiktoken langchain-v0.3: openai - langchain-v0.3: pytest-asyncio langchain-{latest,notiktoken}: langchain langchain-{latest,notiktoken}: langchain-openai langchain-{latest,notiktoken}: openai>=1.6.1 - langchain-{latest,notiktoken}: pytest-asyncio langchain-latest: tiktoken~=0.6.0 - langchain-latest: pytest-asyncio # OpenAI openai: pytest-asyncio @@ -516,22 +512,22 @@ deps = # ~~~ AI ~~~ anthropic-v0.16.0: anthropic==0.16.0 - anthropic-v0.32.0: anthropic==0.32.0 - anthropic-v0.48.0: anthropic==0.48.0 - anthropic-v0.64.0: anthropic==0.64.0 + anthropic-v0.31.2: anthropic==0.31.2 + anthropic-v0.46.0: anthropic==0.46.0 + anthropic-v0.62.0: anthropic==0.62.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 - anthropic-v0.32.0: httpx<0.28.0 - anthropic-v0.48.0: httpx<0.28.0 + anthropic-v0.31.2: httpx<0.28.0 + anthropic-v0.46.0: httpx<0.28.0 cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 cohere-v5.13.12: cohere==5.13.12 - cohere-v5.17.0: cohere==5.17.0 + cohere-v5.16.3: cohere==5.16.3 openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 - openai_agents-v0.2.9: openai-agents==0.2.9 + openai_agents-v0.2.6: openai-agents==0.2.6 openai_agents: pytest-asyncio huggingface_hub-v0.22.2: huggingface_hub==0.22.2 @@ -547,7 +543,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 pymongo-v4.0.2: pymongo==4.0.2 - pymongo-v4.14.1: pymongo==4.14.1 + pymongo-v4.14.0: pymongo==4.14.0 pymongo: mockupdb redis_py_cluster_legacy-v1.3.6: redis-py-cluster==1.3.6 @@ -569,9 +565,9 @@ deps = openfeature-v0.8.2: openfeature-sdk==0.8.2 statsig-v0.55.3: statsig==0.55.3 - statsig-v0.58.4: statsig==0.58.4 + statsig-v0.57.3: statsig==0.57.3 + statsig-v0.59.1: statsig==0.59.1 statsig-v0.61.0: statsig==0.61.0 - statsig-v0.63.0: statsig==0.63.0 statsig: typing_extensions unleash-v6.0.1: UnleashClient==6.0.1 @@ -591,8 +587,7 @@ deps = gql-v3.4.1: gql[all]==3.4.1 gql-v3.5.3: gql[all]==3.5.3 - gql-v4.0.0: gql[all]==4.0.0 - gql-v4.1.0b0: gql[all]==4.1.0b0 + gql-v4.0.0b0: gql[all]==4.0.0b0 graphene-v3.3: graphene==3.3 graphene-v3.4.3: graphene==3.4.3 @@ -603,13 +598,13 @@ deps = py3.6-graphene: aiocontextvars strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 - strawberry-v0.233.3: strawberry-graphql[fastapi,flask]==0.233.3 - strawberry-v0.257.0: strawberry-graphql[fastapi,flask]==0.257.0 - strawberry-v0.280.0: strawberry-graphql[fastapi,flask]==0.280.0 + strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 + strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 + strawberry-v0.278.1: strawberry-graphql[fastapi,flask]==0.278.1 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 - strawberry-v0.233.3: pydantic<2.11 - strawberry-v0.257.0: pydantic<2.11 + strawberry-v0.232.2: pydantic<2.11 + strawberry-v0.255.0: pydantic<2.11 # ~~~ Network ~~~ @@ -679,7 +674,7 @@ deps = flask-v1.1.4: flask==1.1.4 flask-v2.3.3: flask==2.3.3 flask-v3.0.3: flask==3.0.3 - flask-v3.1.2: flask==3.1.2 + flask-v3.1.1: flask==3.1.1 flask: flask-login flask: werkzeug flask-v1.1.4: werkzeug<2.1.0 @@ -688,7 +683,7 @@ deps = starlette-v0.16.0: starlette==0.16.0 starlette-v0.26.1: starlette==0.26.1 starlette-v0.36.3: starlette==0.36.3 - starlette-v0.47.3: starlette==0.47.3 + starlette-v0.47.2: starlette==0.47.2 starlette: pytest-asyncio starlette: python-multipart starlette: requests @@ -785,7 +780,7 @@ deps = trytond-v4.8.18: werkzeug<1.0 typer-v0.15.4: typer==0.15.4 - typer-v0.16.1: typer==0.16.1 + typer-v0.16.0: typer==0.16.0 From 7fc3a235179fe0a29fda39e505732e74e8e2c5b7 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Tue, 26 Aug 2025 11:22:17 +0200 Subject: [PATCH 7/7] fix tox.ini after rebase --- tox.ini | 72 +++++++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/tox.ini b/tox.ini index b355058724..dba653b19b 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,6 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # - # Last generated: 2025-08-26T08:59:42.512502+00:00 [tox] @@ -137,18 +136,18 @@ envlist = # ~~~ AI ~~~ {py3.8,py3.11,py3.12}-anthropic-v0.16.0 - {py3.8,py3.11,py3.12}-anthropic-v0.31.2 - {py3.8,py3.11,py3.12}-anthropic-v0.46.0 - {py3.8,py3.12,py3.13}-anthropic-v0.62.0 + {py3.8,py3.11,py3.12}-anthropic-v0.32.0 + {py3.8,py3.11,py3.12}-anthropic-v0.48.0 + {py3.8,py3.12,py3.13}-anthropic-v0.64.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 {py3.9,py3.11,py3.12}-cohere-v5.13.12 - {py3.9,py3.11,py3.12}-cohere-v5.16.3 + {py3.9,py3.11,py3.12}-cohere-v5.17.0 {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 - {py3.10,py3.12,py3.13}-openai_agents-v0.2.6 + {py3.10,py3.12,py3.13}-openai_agents-v0.2.9 {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 @@ -163,7 +162,7 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 {py3.6,py3.9,py3.10}-pymongo-v4.0.2 - {py3.9,py3.12,py3.13}-pymongo-v4.14.0 + {py3.9,py3.12,py3.13}-pymongo-v4.14.1 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7}-redis_py_cluster_legacy-v2.0.0 @@ -184,9 +183,9 @@ envlist = {py3.9,py3.12,py3.13}-openfeature-v0.8.2 {py3.7,py3.12,py3.13}-statsig-v0.55.3 - {py3.7,py3.12,py3.13}-statsig-v0.57.3 - {py3.7,py3.12,py3.13}-statsig-v0.59.1 + {py3.7,py3.12,py3.13}-statsig-v0.58.4 {py3.7,py3.12,py3.13}-statsig-v0.61.0 + {py3.7,py3.12,py3.13}-statsig-v0.63.0 {py3.8,py3.12,py3.13}-unleash-v6.0.1 {py3.8,py3.12,py3.13}-unleash-v6.1.0 @@ -202,15 +201,16 @@ envlist = {py3.6,py3.9,py3.10}-gql-v3.4.1 {py3.7,py3.11,py3.12}-gql-v3.5.3 - {py3.9,py3.12,py3.13}-gql-v4.0.0b0 + {py3.9,py3.12,py3.13}-gql-v4.0.0 + {py3.9,py3.12,py3.13}-gql-v4.1.0b0 {py3.6,py3.9,py3.10}-graphene-v3.3 {py3.8,py3.12,py3.13}-graphene-v3.4.3 {py3.8,py3.10,py3.11}-strawberry-v0.209.8 - {py3.8,py3.11,py3.12}-strawberry-v0.232.2 - {py3.8,py3.12,py3.13}-strawberry-v0.255.0 - {py3.9,py3.12,py3.13}-strawberry-v0.278.1 + {py3.8,py3.11,py3.12}-strawberry-v0.233.3 + {py3.9,py3.12,py3.13}-strawberry-v0.257.0 + {py3.9,py3.12,py3.13}-strawberry-v0.280.0 # ~~~ Network ~~~ @@ -251,12 +251,12 @@ envlist = {py3.6,py3.7,py3.8}-flask-v1.1.4 {py3.8,py3.12,py3.13}-flask-v2.3.3 {py3.8,py3.12,py3.13}-flask-v3.0.3 - {py3.9,py3.12,py3.13}-flask-v3.1.1 + {py3.9,py3.12,py3.13}-flask-v3.1.2 {py3.6,py3.9,py3.10}-starlette-v0.16.0 {py3.7,py3.10,py3.11}-starlette-v0.26.1 {py3.8,py3.11,py3.12}-starlette-v0.36.3 - {py3.9,py3.12,py3.13}-starlette-v0.47.2 + {py3.9,py3.12,py3.13}-starlette-v0.47.3 {py3.6,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.91.0 @@ -310,7 +310,7 @@ envlist = {py3.9,py3.12,py3.13}-trytond-v7.6.5 {py3.7,py3.12,py3.13}-typer-v0.15.4 - {py3.7,py3.12,py3.13}-typer-v0.16.0 + {py3.7,py3.12,py3.13}-typer-v0.16.1 @@ -408,6 +408,7 @@ deps = httpx-latest: httpx # Langchain + langchain: pytest-asyncio langchain-v0.1: openai~=1.0.0 langchain-v0.1: langchain~=0.1.11 langchain-v0.1: tiktoken~=0.6.0 @@ -512,22 +513,22 @@ deps = # ~~~ AI ~~~ anthropic-v0.16.0: anthropic==0.16.0 - anthropic-v0.31.2: anthropic==0.31.2 - anthropic-v0.46.0: anthropic==0.46.0 - anthropic-v0.62.0: anthropic==0.62.0 + anthropic-v0.32.0: anthropic==0.32.0 + anthropic-v0.48.0: anthropic==0.48.0 + anthropic-v0.64.0: anthropic==0.64.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 - anthropic-v0.31.2: httpx<0.28.0 - anthropic-v0.46.0: httpx<0.28.0 + anthropic-v0.32.0: httpx<0.28.0 + anthropic-v0.48.0: httpx<0.28.0 cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 cohere-v5.13.12: cohere==5.13.12 - cohere-v5.16.3: cohere==5.16.3 + cohere-v5.17.0: cohere==5.17.0 openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 - openai_agents-v0.2.6: openai-agents==0.2.6 + openai_agents-v0.2.9: openai-agents==0.2.9 openai_agents: pytest-asyncio huggingface_hub-v0.22.2: huggingface_hub==0.22.2 @@ -543,7 +544,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 pymongo-v4.0.2: pymongo==4.0.2 - pymongo-v4.14.0: pymongo==4.14.0 + pymongo-v4.14.1: pymongo==4.14.1 pymongo: mockupdb redis_py_cluster_legacy-v1.3.6: redis-py-cluster==1.3.6 @@ -565,9 +566,9 @@ deps = openfeature-v0.8.2: openfeature-sdk==0.8.2 statsig-v0.55.3: statsig==0.55.3 - statsig-v0.57.3: statsig==0.57.3 - statsig-v0.59.1: statsig==0.59.1 + statsig-v0.58.4: statsig==0.58.4 statsig-v0.61.0: statsig==0.61.0 + statsig-v0.63.0: statsig==0.63.0 statsig: typing_extensions unleash-v6.0.1: UnleashClient==6.0.1 @@ -587,7 +588,8 @@ deps = gql-v3.4.1: gql[all]==3.4.1 gql-v3.5.3: gql[all]==3.5.3 - gql-v4.0.0b0: gql[all]==4.0.0b0 + gql-v4.0.0: gql[all]==4.0.0 + gql-v4.1.0b0: gql[all]==4.1.0b0 graphene-v3.3: graphene==3.3 graphene-v3.4.3: graphene==3.4.3 @@ -598,13 +600,13 @@ deps = py3.6-graphene: aiocontextvars strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 - strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 - strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 - strawberry-v0.278.1: strawberry-graphql[fastapi,flask]==0.278.1 + strawberry-v0.233.3: strawberry-graphql[fastapi,flask]==0.233.3 + strawberry-v0.257.0: strawberry-graphql[fastapi,flask]==0.257.0 + strawberry-v0.280.0: strawberry-graphql[fastapi,flask]==0.280.0 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 - strawberry-v0.232.2: pydantic<2.11 - strawberry-v0.255.0: pydantic<2.11 + strawberry-v0.233.3: pydantic<2.11 + strawberry-v0.257.0: pydantic<2.11 # ~~~ Network ~~~ @@ -674,7 +676,7 @@ deps = flask-v1.1.4: flask==1.1.4 flask-v2.3.3: flask==2.3.3 flask-v3.0.3: flask==3.0.3 - flask-v3.1.1: flask==3.1.1 + flask-v3.1.2: flask==3.1.2 flask: flask-login flask: werkzeug flask-v1.1.4: werkzeug<2.1.0 @@ -683,7 +685,7 @@ deps = starlette-v0.16.0: starlette==0.16.0 starlette-v0.26.1: starlette==0.26.1 starlette-v0.36.3: starlette==0.36.3 - starlette-v0.47.2: starlette==0.47.2 + starlette-v0.47.3: starlette==0.47.3 starlette: pytest-asyncio starlette: python-multipart starlette: requests @@ -780,7 +782,7 @@ deps = trytond-v4.8.18: werkzeug<1.0 typer-v0.15.4: typer==0.15.4 - typer-v0.16.0: typer==0.16.0 + typer-v0.16.1: typer==0.16.1