From 96847b9f6a75daca8a9b8a0bd801c8230fa36375 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 30 Mar 2026 11:23:35 -0500 Subject: [PATCH 01/20] fix(langchain): wrap tools with StructuredTool before bind_tools Raw Python callables passed to bind_tools() lack the schema metadata required to generate a valid OpenAI tools[].function spec, causing a 400 "Missing required parameter: 'tools[0].function'" error. Wrapping each callable with StructuredTool.from_function(name=config_key) also ensures the model response carries the config key as the tool name, fixing a separate bug where function.__name__ was being tracked instead of the LaunchDarkly config key. Co-Authored-By: Claude Sonnet 4.6 --- .../langgraph_agent_graph_runner.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index c0c0b5c0..0a905d94 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -53,6 +53,7 @@ async def run(self, input: Any) -> AgentGraphResult: start_ns = time.perf_counter_ns() try: from langchain_core.messages import AnyMessage, HumanMessage + from langchain_core.tools import StructuredTool from langgraph.graph import END, START, StateGraph from typing_extensions import TypedDict @@ -74,11 +75,18 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None: if node_config.model: lc_model = create_langchain_model(node_config) tool_defs = node_config.model.get_parameter('tools') or [] - tool_fns = [ - tools_ref[t.get('name', '')] - for t in tool_defs - if t.get('name', '') in tools_ref - ] + tool_fns = [] + for t in tool_defs: + config_key = t.get('name', '') + if config_key not in tools_ref: + continue + tool_fns.append( + StructuredTool.from_function( + func=tools_ref[config_key], + name=config_key, + description=t.get('description', ''), + ) + ) model = lc_model.bind_tools(tool_fns) if tool_fns else lc_model def invoke(state: WorkflowState) -> WorkflowState: From daa5d7b1d88bc80ec474e0fd41c690ae06fd809c Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 30 Mar 2026 11:51:39 -0500 Subject: [PATCH 02/20] fix(openai): remove _to_openai_name and pass configured model to Agent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _to_openai_name converted hyphens to underscores based on an assumed restriction that doesn't exist — the OpenAI API allows hyphens in tool and function names (^[a-zA-Z0-9_-]{1,64}$). Removing it simplifies the tool name lookup and agent naming. Also adds model=model.name to the Agent constructor so each node runs with its configured model rather than the SDK default. Co-Authored-By: Claude Sonnet 4.6 --- .../ldai_openai/openai_agent_graph_runner.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index df2acf65..cdbebd6f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -10,11 +10,6 @@ from ldai.tracker import TokenUsage -def _to_openai_name(name: str) -> str: - """Convert a hyphenated tool/node name to an underscore-separated OpenAI function name.""" - return name.replace('-', '_') - - def _build_native_tool_map() -> dict: try: from agents import ( @@ -281,21 +276,19 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: # --- tools --- agent_tools: List[Tool] = [] for tool_def in tool_defs: - tool_name_raw = tool_def.get('name', '') - tool_name = _to_openai_name(tool_name_raw) + tool_name = tool_def.get('name', '') # Check native OpenAI tools first, then fall back to ToolRegistry if tool_name in _NATIVE_OPENAI_TOOLS: agent_tools.append(_NATIVE_OPENAI_TOOLS[tool_name](tool_def)) continue - tool_fn = self._tools.get(tool_name) or self._tools.get(tool_name_raw) + tool_fn = self._tools.get(tool_name) if not tool_fn: continue def _make_tool( name: str, - raw_name: str, fn: Any, description: str, params_schema: dict, @@ -306,7 +299,7 @@ def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any: args = json.loads(tool_args) except Exception: args = {} - path.append(raw_name) + path.append(name) if config_tracker is not None: config_tracker.track_tool_call( name, @@ -324,7 +317,6 @@ def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any: agent_tools.append( _make_tool( tool_name, - tool_name_raw, tool_fn, tool_def.get('description', ''), tool_def.get('parameters', {}), @@ -332,7 +324,8 @@ def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any: ) return Agent( - name=_to_openai_name(node_config.key), + name=node_config.key, + model=model.name, instructions=f'{RECOMMENDED_PROMPT_PREFIX} {node_config.instructions or ""}', handoffs=list(agent_handoffs), tools=list(agent_tools), From 26f16654c05ce5dba098bbb43906cd985e5d58e8 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 30 Mar 2026 14:06:50 -0500 Subject: [PATCH 03/20] fix(openai): track native tool calls by parsing RunResult.new_items Previously, tool tracking relied on wrapping custom FunctionTools, which meant native hosted tools (WebSearchTool, FileSearchTool, etc.) were never tracked since they run server-side with no local callback. Instead, parse result.new_items after the run completes. Each ToolCallItem carries the originating agent (node attribution) and raw tool call data, covering both custom and native tools without requiring local execution. Also adds openai-agents as an optional dependency and dev dependency. Co-Authored-By: Claude Sonnet 4.6 --- .../server-ai-openai/pyproject.toml | 4 ++ .../ldai_openai/openai_agent_graph_runner.py | 24 ++++++---- .../src/ldai_openai/openai_helper.py | 46 ++++++++++++++++++- 3 files changed, 64 insertions(+), 10 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml index eec86422..0f7f78c7 100644 --- a/packages/ai-providers/server-ai-openai/pyproject.toml +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -23,6 +23,9 @@ dependencies = [ "openai>=1.0.0", ] +[project.optional-dependencies] +agents = ["openai-agents>=0.0.1"] + [project.urls] Homepage = "https://docs.launchdarkly.com/sdk/ai/python" Repository = "https://github.com/launchdarkly/python-server-sdk-ai" @@ -35,6 +38,7 @@ dev = [ "mypy==1.18.2", "pycodestyle>=2.11.0", "isort>=5.12.0", + "openai-agents>=0.0.1", ] [build-system] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index cdbebd6f..68cabcec 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -9,6 +9,8 @@ from ldai.providers.types import LDAIMetrics from ldai.tracker import TokenUsage +from ldai_openai.openai_helper import get_tool_calls_from_run_items + def _build_native_tool_map() -> dict: try: @@ -85,6 +87,7 @@ async def run(self, input: Any) -> AgentGraphResult: root_agent = self._build_agents(path, state) result = await Runner.run(root_agent, str(input)) self._flush_final_segment(state, tracker, result) + self._track_tool_calls(result, tracker) duration = (time.perf_counter_ns() - start_ns) // 1_000_000 @@ -163,6 +166,17 @@ def _flush_final_segment( config_tracker.track_duration(int(duration_ms), graph_key=gk) config_tracker.track_success(graph_key=gk) + def _track_tool_calls(self, result: Any, tracker: Any) -> None: + """Track all tool calls from the run result, attributed to the node that called them.""" + gk = tracker.graph_key if tracker is not None else None + for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items): + node = self._graph.get_node(agent_name) + if node is None: + continue + config_tracker = node.get_config().tracker + if config_tracker is not None: + config_tracker.track_tool_call(tool_name, graph_key=gk) + def _handle_handoff( self, run_ctx: Any, @@ -231,12 +245,10 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any: Agent, FunctionTool, Handoff, - RunContextWrapper, Tool, handoff, ) from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX - from agents.tool_context import ToolContext except ImportError as exc: raise ImportError( "openai-agents is required for OpenAIAgentGraphRunner. " @@ -293,18 +305,12 @@ def _make_tool( description: str, params_schema: dict, ) -> FunctionTool: - def wrapped(tool_ctx: ToolContext, tool_args: str) -> Any: + def wrapped(tool_ctx: Any, tool_args: str) -> Any: import json try: args = json.loads(tool_args) except Exception: args = {} - path.append(name) - if config_tracker is not None: - config_tracker.track_tool_call( - name, - graph_key=tracker.graph_key if tracker is not None else None, - ) return fn(**args) return FunctionTool( diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 3cc41e48..89d1ca40 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -1,4 +1,4 @@ -from typing import Any, Iterable, List, Optional, cast +from typing import Any, Iterable, List, Optional, Tuple, cast from ldai import LDMessage from ldai.providers.types import LDAIMetrics @@ -44,3 +44,47 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: :return: LDAIMetrics with success status and token usage """ return LDAIMetrics(success=True, usage=get_ai_usage_from_response(response)) + + +# Native tool raw_item type names don't always match the LD config key convention. +_NATIVE_TOOL_TYPE_TO_CONFIG_KEY = { + 'web_search': 'web_search_tool', + 'file_search': 'file_search_tool', +} + + +def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]]: + """ + Extract (agent_name, tool_name) pairs from RunResult.new_items. + + Covers both custom FunctionTools (tracked by their config key) and native + hosted tools (web search, file search, code interpreter, image generation). + + :param new_items: The new_items list from a RunResult + :return: List of (agent_name, tool_name) tuples + """ + try: + from agents.items import ToolCallItem + from openai.types.responses import ResponseFunctionToolCall + except ImportError: + return [] + + result = [] + for item in new_items: + if not isinstance(item, ToolCallItem): + continue + agent_name = getattr(item.agent, 'name', None) + if not agent_name: + continue + raw = item.raw_item + if isinstance(raw, ResponseFunctionToolCall): + # Custom FunctionTools are registered as 'tool_{config_key}' + tool_name = raw.name.removeprefix('tool_') + else: + raw_type = getattr(raw, 'type', None) or (raw.get('type') if isinstance(raw, dict) else None) + if not raw_type: + continue + tool_name = _NATIVE_TOOL_TYPE_TO_CONFIG_KEY.get(raw_type, raw_type) + if tool_name: + result.append((agent_name, tool_name)) + return result From 7800e522c865dfbf0273d25fecbda613c3705731 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Mon, 30 Mar 2026 14:07:30 -0500 Subject: [PATCH 04/20] chore(langchain): add langgraph as optional and dev dependency langgraph is required to use LangGraphAgentGraphRunner but was not listed as a dependency. Adding it as an optional extra (graph) and dev dependency, consistent with how openai-agents is handled in the openai provider. Co-Authored-By: Claude Sonnet 4.6 --- packages/ai-providers/server-ai-langchain/pyproject.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index a8fe2efd..f2469ac9 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -24,6 +24,9 @@ dependencies = [ "langchain>=1.0.0", ] +[project.optional-dependencies] +graph = ["langgraph>=0.1.0"] + [project.urls] Homepage = "https://docs.launchdarkly.com/sdk/ai/python" Repository = "https://github.com/launchdarkly/python-server-sdk-ai" @@ -36,6 +39,7 @@ dev = [ "mypy==1.18.2", "pycodestyle>=2.11.0", "isort>=5.12.0", + "langgraph>=0.1.0", ] [build-system] From 957747db02d33f54bb4898d274b7a82e19f581dd Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 31 Mar 2026 11:40:56 -0500 Subject: [PATCH 05/20] fix: resolve lint and test failures - Remove FileSearchTool, CodeInterpreterTool, ImageGenerationTool from the native tool map; they require mandatory constructor args not available from the LD config and were already broken before. - Move StructuredTool import inside the tools loop so it is only attempted when tools are present, fixing a test that mocks langchain_core without langchain_core.tools. Co-Authored-By: Claude Sonnet 4.6 --- .../src/ldai_langchain/langgraph_agent_graph_runner.py | 2 +- .../src/ldai_openai/openai_agent_graph_runner.py | 10 +--------- .../server-ai-openai/src/ldai_openai/openai_helper.py | 1 - 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 0a905d94..6f75295d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -53,7 +53,6 @@ async def run(self, input: Any) -> AgentGraphResult: start_ns = time.perf_counter_ns() try: from langchain_core.messages import AnyMessage, HumanMessage - from langchain_core.tools import StructuredTool from langgraph.graph import END, START, StateGraph from typing_extensions import TypedDict @@ -80,6 +79,7 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None: config_key = t.get('name', '') if config_key not in tools_ref: continue + from langchain_core.tools import StructuredTool tool_fns.append( StructuredTool.from_function( func=tools_ref[config_key], diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 68cabcec..4bda667a 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -14,17 +14,9 @@ def _build_native_tool_map() -> dict: try: - from agents import ( - CodeInterpreterTool, - FileSearchTool, - ImageGenerationTool, - WebSearchTool, - ) + from agents import WebSearchTool return { 'web_search_tool': lambda _: WebSearchTool(), - 'file_search_tool': lambda _: FileSearchTool(), - 'code_interpreter': lambda _: CodeInterpreterTool(), - 'image_generation': lambda _: ImageGenerationTool(), } except ImportError: return {} diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 89d1ca40..f63076b6 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -49,7 +49,6 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: # Native tool raw_item type names don't always match the LD config key convention. _NATIVE_TOOL_TYPE_TO_CONFIG_KEY = { 'web_search': 'web_search_tool', - 'file_search': 'file_search_tool', } From fb647ae481f5148e17f3aa1e0c7a86562bcfeafa Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 31 Mar 2026 13:01:21 -0500 Subject: [PATCH 06/20] additional cleanup --- .../ldai_langchain/langchain_agent_runner.py | 8 +- .../src/ldai_langchain/langchain_helper.py | 14 ++ .../langgraph_agent_graph_runner.py | 23 +- .../ldai_openai/openai_agent_graph_runner.py | 213 +++++++++--------- .../src/ldai_openai/openai_helper.py | 20 +- .../src/ldai_openai/openai_runner_factory.py | 22 +- packages/sdk/server-ai/src/ldai/client.py | 11 +- .../src/ldai/providers/runner_factory.py | 10 +- 8 files changed, 172 insertions(+), 149 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index b0a1c850..bb637714 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -6,7 +6,7 @@ from ldai.providers import AgentResult, AgentRunner from ldai.providers.types import LDAIMetrics -from ldai_langchain.langchain_helper import sum_token_usage_from_messages +from ldai_langchain.langchain_helper import extract_last_message_content, sum_token_usage_from_messages class LangChainAgentRunner(AgentRunner): @@ -37,11 +37,7 @@ async def run(self, input: Any) -> AgentResult: "messages": [{"role": "user", "content": str(input)}] }) messages = result.get("messages", []) - output = "" - if messages: - last = messages[-1] - if hasattr(last, 'content') and isinstance(last.content, str): - output = last.content + output = extract_last_message_content(messages) return AgentResult( output=output, raw=result, diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py index 677d2435..a42cf129 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py @@ -234,6 +234,20 @@ def get_tool_calls_from_response(response: Any) -> List[str]: return names +def extract_last_message_content(messages: List[Any]) -> str: + """ + Extract the string content of the last message in a list. + + :param messages: List of LangChain message objects + :return: String content of the last message, or empty string if none + """ + if messages: + last = messages[-1] + if hasattr(last, 'content'): + return str(last.content) + return '' + + def sum_token_usage_from_messages(messages: List[Any]) -> Optional[TokenUsage]: """ Sum token usage across LangChain messages using get_ai_usage_from_response per message. diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 6f75295d..746fdd16 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -10,7 +10,9 @@ from ldai.providers.types import LDAIMetrics from ldai_langchain.langchain_helper import ( + build_structured_tools, create_langchain_model, + extract_last_message_content, get_ai_metrics_from_response, get_ai_usage_from_response, get_tool_calls_from_response, @@ -73,20 +75,7 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None: model = None if node_config.model: lc_model = create_langchain_model(node_config) - tool_defs = node_config.model.get_parameter('tools') or [] - tool_fns = [] - for t in tool_defs: - config_key = t.get('name', '') - if config_key not in tools_ref: - continue - from langchain_core.tools import StructuredTool - tool_fns.append( - StructuredTool.from_function( - func=tools_ref[config_key], - name=config_key, - description=t.get('description', ''), - ) - ) + tool_fns = build_structured_tools(node_config, tools_ref) model = lc_model.bind_tools(tool_fns) if tool_fns else lc_model def invoke(state: WorkflowState) -> WorkflowState: @@ -132,12 +121,8 @@ def invoke(state: WorkflowState) -> WorkflowState: ) duration = (time.perf_counter_ns() - start_ns) // 1_000_000 - output = '' messages = result.get('messages', []) - if messages: - last = messages[-1] - if hasattr(last, 'content'): - output = str(last.content) + output = extract_last_message_content(messages) if tracker: tracker.track_path(exec_path) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 19967917..fbddd2db 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -1,5 +1,6 @@ """OpenAI agent graph runner for LaunchDarkly AI SDK.""" +import json import time from typing import Any, List, Optional @@ -11,6 +12,7 @@ from ldai_openai.openai_helper import ( NATIVE_OPENAI_TOOLS, + extract_usage_from_request_entry, get_ai_usage_from_response, get_tool_calls_from_run_items, ) @@ -40,7 +42,7 @@ def __init__(self, graph: AgentGraphDefinition, tools: ToolRegistry): Initialize the runner. :param graph: The AgentGraphDefinition to execute - :param tools: Registry mapping OpenAI-formatted tool names to callables + :param tools: Registry mapping tool names to callables """ self._graph = graph self._tools = tools @@ -105,105 +107,6 @@ async def run(self, input: Any) -> AgentGraphResult: metrics=LDAIMetrics(success=False), ) - def _flush_final_segment( - self, - state: _RunState, - tracker: Any, - result: Any, - ) -> None: - """Record duration/tokens for the last active agent (no handoff after it).""" - if not state.last_node_key: - return - node = self._graph.get_node(state.last_node_key) - if node is None: - return - config_tracker = node.get_config().tracker - if config_tracker is None: - return - - now_ns = time.perf_counter_ns() - duration_ms = (now_ns - state.last_handoff_ns) // 1_000_000 - - usage: Optional[TokenUsage] = None - try: - usage_entry = result.context_wrapper.usage.request_usage_entries[-1] - usage = TokenUsage( - total=usage_entry.total_tokens, - input=usage_entry.input_tokens, - output=usage_entry.output_tokens, - ) - except Exception: - pass - - gk = tracker.graph_key if tracker is not None else None - if usage is not None: - config_tracker.track_tokens(usage, graph_key=gk) - config_tracker.track_duration(int(duration_ms), graph_key=gk) - config_tracker.track_success(graph_key=gk) - - def _track_tool_calls(self, result: Any, tracker: Any) -> None: - """Track all tool calls from the run result, attributed to the node that called them.""" - gk = tracker.graph_key if tracker is not None else None - for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items): - node = self._graph.get_node(agent_name) - if node is None: - continue - config_tracker = node.get_config().tracker - if config_tracker is not None: - config_tracker.track_tool_call(tool_name, graph_key=gk) - - def _handle_handoff( - self, - run_ctx: Any, - src: str, - tgt: str, - path: List[str], - tracker: Any, - config_tracker: Any, - state: _RunState, - ) -> None: - path.append(tgt) - state.last_node_key = tgt - if tracker: - tracker.track_handoff_success(src, tgt) - - usage: Optional[TokenUsage] = None - now_ns = time.perf_counter_ns() - duration_ms = (now_ns - state.last_handoff_ns) // 1_000_000 - state.last_handoff_ns = now_ns - try: - usage_entry = run_ctx.usage.request_usage_entries[-1] - usage = TokenUsage( - total=usage_entry.total_tokens, - input=usage_entry.input_tokens, - output=usage_entry.output_tokens, - ) - except Exception: - pass - - gk = tracker.graph_key if tracker is not None else None - if config_tracker is not None: - if usage is not None: - config_tracker.track_tokens(usage, graph_key=gk) - if duration_ms is not None: - config_tracker.track_duration(int(duration_ms), graph_key=gk) - config_tracker.track_success(graph_key=gk) - - def _make_on_handoff( - self, - src: str, - tgt: str, - path: List[str], - tracker: Any, - config_tracker: Any, - state: _RunState, - ): - def on_handoff(run_ctx: Any) -> None: - self._handle_handoff( - run_ctx, src, tgt, path, tracker, config_tracker, state - ) - return on_handoff - def _build_agents(self, path: List[str], state: _RunState) -> Any: """ Build the agent tree from the graph definition via reverse_traverse. @@ -280,16 +183,22 @@ def _make_tool( description: str, params_schema: dict, ) -> FunctionTool: - def wrapped(tool_ctx: Any, tool_args: str) -> Any: - import json + async def wrapped(tool_ctx: Any, tool_args: str) -> str: try: - args = json.loads(tool_args) + args = json.loads(tool_args) if tool_args else {} except Exception: args = {} - return fn(**args) + try: + res = fn(**args) + if hasattr(res, "__await__"): + res = await res + return str(res) + except Exception as e: + log.warning(f"Tool '{name}' execution failed: {e}") + return f"Tool execution failed: {e}" return FunctionTool( - name=f'tool_{name}', + name=name, description=description, params_json_schema=params_schema, on_invoke_tool=wrapped, @@ -313,3 +222,97 @@ def wrapped(tool_ctx: Any, tool_args: str) -> Any: ) return self._graph.reverse_traverse(fn=build_node) + + def _make_on_handoff( + self, + src: str, + tgt: str, + path: List[str], + tracker: Any, + config_tracker: Any, + state: _RunState, + ): + def on_handoff(run_ctx: Any) -> None: + self._handle_handoff( + run_ctx, src, tgt, path, tracker, config_tracker, state + ) + return on_handoff + + def _handle_handoff( + self, + run_ctx: Any, + src: str, + tgt: str, + path: List[str], + tracker: Any, + config_tracker: Any, + state: _RunState, + ) -> None: + path.append(tgt) + state.last_node_key = tgt + if tracker: + tracker.track_handoff_success(src, tgt) + + now_ns = time.perf_counter_ns() + duration_ms = (now_ns - state.last_handoff_ns) // 1_000_000 + state.last_handoff_ns = now_ns + + usage: Optional[TokenUsage] = None + try: + usage = extract_usage_from_request_entry( + run_ctx.usage.request_usage_entries[-1] + ) + except Exception: + pass + + gk = tracker.graph_key if tracker is not None else None + if config_tracker is not None: + if usage is not None: + config_tracker.track_tokens(usage, graph_key=gk) + if duration_ms is not None: + config_tracker.track_duration(int(duration_ms), graph_key=gk) + config_tracker.track_success(graph_key=gk) + + def _flush_final_segment( + self, + state: _RunState, + tracker: Any, + result: Any, + ) -> None: + """Record duration/tokens for the last active agent (no handoff after it).""" + if not state.last_node_key: + return + node = self._graph.get_node(state.last_node_key) + if node is None: + return + config_tracker = node.get_config().tracker + if config_tracker is None: + return + + now_ns = time.perf_counter_ns() + duration_ms = (now_ns - state.last_handoff_ns) // 1_000_000 + + usage: Optional[TokenUsage] = None + try: + usage = extract_usage_from_request_entry( + result.context_wrapper.usage.request_usage_entries[-1] + ) + except Exception: + pass + + gk = tracker.graph_key if tracker is not None else None + if usage is not None: + config_tracker.track_tokens(usage, graph_key=gk) + config_tracker.track_duration(int(duration_ms), graph_key=gk) + config_tracker.track_success(graph_key=gk) + + def _track_tool_calls(self, result: Any, tracker: Any) -> None: + """Track all tool calls from the run result, attributed to the node that called them.""" + gk = tracker.graph_key if tracker is not None else None + for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items): + node = self._graph.get_node(agent_name) + if node is None: + continue + config_tracker = node.get_config().tracker + if config_tracker is not None: + config_tracker.track_tool_call(tool_name, graph_key=gk) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 8923b360..6e4ade17 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -64,6 +64,23 @@ def get_ai_usage_from_response(response: Any) -> Optional[TokenUsage]: return None +def extract_usage_from_request_entry(entry: Any) -> Optional[TokenUsage]: + """ + Extract token usage from a single request_usage_entry in an openai-agents RunResult. + + :param entry: A request_usage_entry from context_wrapper.usage.request_usage_entries + :return: TokenUsage or None if extraction fails + """ + try: + return TokenUsage( + total=entry.total_tokens, + input=entry.input_tokens, + output=entry.output_tokens, + ) + except Exception: + return None + + def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: """ Extract LaunchDarkly AI metrics from an OpenAI response. @@ -105,8 +122,7 @@ def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]] continue raw = item.raw_item if isinstance(raw, ResponseFunctionToolCall): - # Custom FunctionTools are registered as 'tool_{config_key}' - tool_name = raw.name.removeprefix('tool_') + tool_name = raw.name else: raw_type = getattr(raw, 'type', None) or (raw.get('type') if isinstance(raw, dict) else None) if not raw_type: diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 644d12f0..28166557 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -24,6 +24,17 @@ def __init__(self, client: Optional[AsyncOpenAI] = None): api_key=os.environ.get('OPENAI_API_KEY'), ) + def _extract_model_config(self, config: AIConfigKind) -> tuple: + """ + Extract model name and parameters from an AI config. + + :param config: The LaunchDarkly AI configuration + :return: Tuple of (model_name, parameters) + """ + config_dict = config.to_dict() + model_dict = config_dict.get('model') or {} + return model_dict.get('name', ''), model_dict.get('parameters') or {} + def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: """ Create a configured OpenAIModelRunner for the given AI config. @@ -33,10 +44,7 @@ def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: :param config: The LaunchDarkly AI configuration :return: OpenAIModelRunner ready to invoke the model """ - config_dict = config.to_dict() - model_dict = config_dict.get('model') or {} - model_name = model_dict.get('name', '') - parameters = model_dict.get('parameters') or {} + model_name, parameters = self._extract_model_config(config) return OpenAIModelRunner(self._client, model_name, parameters) def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: @@ -60,10 +68,8 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> 'Op """ from ldai_openai.openai_agent_runner import OpenAIAgentRunner - config_dict = config.to_dict() - model_dict = config_dict.get('model') or {} - model_name = model_dict.get('name', '') - parameters = dict(model_dict.get('parameters') or {}) + model_name, base_parameters = self._extract_model_config(config) + parameters = dict(base_parameters) tool_definitions = parameters.pop('tools', []) or [] instructions = (config.instructions or '') if hasattr(config, 'instructions') else '' diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index d77974e9..cdc14c42 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -32,14 +32,15 @@ from ldai.tracker import AIGraphTracker, LDAIConfigTracker _TRACK_SDK_INFO = '$ld:ai:sdk:info' -_TRACK_USAGE_COMPLETION_CONFIG = '$ld:ai:usage:completion-config' -_TRACK_USAGE_CREATE_MODEL = '$ld:ai:usage:create-model' -_TRACK_USAGE_CREATE_AGENT = '$ld:ai:usage:create-agent' -_TRACK_USAGE_JUDGE_CONFIG = '$ld:ai:usage:judge-config' -_TRACK_USAGE_CREATE_JUDGE = '$ld:ai:usage:create-judge' + _TRACK_USAGE_AGENT_CONFIG = '$ld:ai:usage:agent-config' _TRACK_USAGE_AGENT_CONFIGS = '$ld:ai:usage:agent-configs' +_TRACK_USAGE_COMPLETION_CONFIG = '$ld:ai:usage:completion-config' +_TRACK_USAGE_CREATE_AGENT = '$ld:ai:usage:create-agent' _TRACK_USAGE_CREATE_AGENT_GRAPH = '$ld:ai:usage:create-agent-graph' +_TRACK_USAGE_CREATE_JUDGE = '$ld:ai:usage:create-judge' +_TRACK_USAGE_CREATE_MODEL = '$ld:ai:usage:create-model' +_TRACK_USAGE_JUDGE_CONFIG = '$ld:ai:usage:judge-config' _INIT_TRACK_CONTEXT = Context.builder('ld-internal-tracking').kind('ld_ai').anonymous(True).build() diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index 190704ee..589896bd 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -4,6 +4,8 @@ from ldai import log from ldai.models import AIConfigKind from ldai.providers.ai_provider import AIProvider +from ldai.providers.agent_graph_runner import AgentGraphRunner +from ldai.providers.agent_runner import AgentRunner from ldai.providers.model_runner import ModelRunner T = TypeVar('T') @@ -133,14 +135,14 @@ def create_agent( config: Any, tools: Any, default_ai_provider: Optional[str] = None, - ) -> Optional[Any]: + ) -> Optional[AgentRunner]: """ Create an agent executor for the given AI agent config and tool registry. :param config: LaunchDarkly AI agent config :param tools: Tool registry mapping tool names to callables :param default_ai_provider: Optional provider override - :return: AgentExecutor instance, or None + :return: AgentRunner instance, or None """ provider_name = config.provider.name.lower() if config.provider else None providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) @@ -151,14 +153,14 @@ def create_agent_graph( graph_def: Any, tools: Any, default_ai_provider: Optional[str] = None, - ) -> Optional[Any]: + ) -> Optional[AgentGraphRunner]: """ Create an agent graph executor for the given graph definition and tool registry. :param graph_def: AgentGraphDefinition instance :param tools: Tool registry mapping tool names to callables :param default_ai_provider: Optional provider override - :return: AgentGraphExecutor instance, or None + :return: AgentGraphRunner instance, or None """ provider_name = None if graph_def.root() and graph_def.root().get_config() and graph_def.root().get_config().provider: From c5a494ba20b241d27dfebc90e4ba64bece1f1afb Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 31 Mar 2026 16:42:54 -0500 Subject: [PATCH 07/20] simplify langchain tools --- .../src/ldai_langchain/langchain_helper.py | 117 ++++-------------- .../langchain_runner_factory.py | 4 +- .../langgraph_agent_graph_runner.py | 4 +- .../tests/test_langchain_provider.py | 4 +- .../src/ldai_openai/openai_agent_runner.py | 74 +++++------ 5 files changed, 64 insertions(+), 139 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py index a42cf129..b1b18c8f 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py @@ -1,7 +1,7 @@ -from typing import Any, Dict, List, Optional, Union +from typing import Any, List, Optional from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage from ldai import LDMessage, log from ldai.models import AIConfigKind from ldai.providers import ToolRegistry @@ -51,18 +51,12 @@ def convert_messages_to_langchain( return result -def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[ToolRegistry] = None) -> BaseChatModel: +def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: """ Create a LangChain BaseChatModel from a LaunchDarkly AI configuration. - If the config includes tool definitions and a tool_registry is provided, tools found - in the registry are bound to the model. Tools not found in the registry are skipped - with a warning. Built-in provider tools (e.g. code_interpreter) are not supported - via LangChain's bind_tools abstraction and are skipped with a warning. - :param ai_config: The LaunchDarkly AI configuration - :param tool_registry: Optional registry mapping tool names to callable implementations - :return: A configured LangChain BaseChatModel, with tools bound if applicable + :return: A configured LangChain BaseChatModel """ from langchain.chat_models import init_chat_model @@ -73,7 +67,7 @@ def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[Tool model_name = model_dict.get('name', '') provider = provider_dict.get('name', '') parameters = dict(model_dict.get('parameters') or {}) - tool_definitions = parameters.pop('tools', []) or [] + parameters.pop('tools', None) mapped_provider = map_provider(provider) # Bedrock requires the foundation provider (e.g. Bedrock:Anthropic) passed in @@ -81,104 +75,43 @@ def create_langchain_model(ai_config: AIConfigKind, tool_registry: Optional[Tool if mapped_provider == 'bedrock_converse' and 'provider' not in parameters: parameters['provider'] = provider.removeprefix('bedrock:') - model = init_chat_model( + return init_chat_model( model_name, model_provider=mapped_provider, **parameters, ) - if tool_definitions and tool_registry is not None: - bindable = _resolve_tools_for_langchain(tool_definitions, tool_registry) - if bindable: - model = model.bind_tools(bindable) - - return model - -def _iter_valid_tools( - tool_definitions: List[Dict[str, Any]], - tool_registry: ToolRegistry, -) -> List[tuple]: - """ - Filter LD tool definitions against a registry, returning (name, td) pairs for each - valid function tool that has a callable implementation. Built-in provider tools and - tools missing from the registry are skipped with a warning. +def build_tools(ai_config: AIConfigKind, tool_registry: ToolRegistry) -> List[Any]: """ - valid = [] - for td in tool_definitions: - if not isinstance(td, dict): - continue - - tool_type = td.get('type') - if tool_type and tool_type != 'function': - log.warning( - f"Built-in tool '{tool_type}' is not reliably supported via LangChain and will be skipped. " - "Use a provider-specific runner to use built-in provider tools." - ) - continue - - name = td.get('name') - if not name: - continue - - if name not in tool_registry: - log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.") - continue + Return callables from the registry for each tool defined in the AI config. - valid.append((name, td)) - - return valid - - -def _resolve_tools_for_langchain( - tool_definitions: List[Dict[str, Any]], - tool_registry: ToolRegistry, -) -> List[Dict[str, Any]]: - """ - Match LD tool definitions against a registry, returning function-calling tool dicts - for tools that have a callable implementation. Built-in provider tools and tools - missing from the registry are skipped with a warning. - """ - return [ - { - 'type': 'function', - 'function': { - 'name': name, - 'description': td.get('description', ''), - 'parameters': td.get('parameters', {'type': 'object', 'properties': {}}), - }, - } - for name, td in _iter_valid_tools(tool_definitions, tool_registry) - ] - - -def build_structured_tools(ai_config: AIConfigKind, tool_registry: ToolRegistry) -> List[Any]: - """ - Build a list of LangChain StructuredTool instances from LD tool definitions and a registry. - - Tools found in the registry are wrapped as StructuredTool with the name and description - from the LD config. Built-in provider tools and tools missing from the registry are - skipped with a warning. + Tools not found in the registry are skipped with a warning. The returned + callables can be passed directly to bind_tools or langchain.agents.create_agent. + Functions should have type-annotated parameters so LangChain can infer the schema. :param ai_config: The LaunchDarkly AI configuration :param tool_registry: Registry mapping tool names to callable implementations - :return: List of StructuredTool instances ready to pass to langchain.agents.create_agent + :return: List of callables ready to pass to bind_tools or create_agent """ - from langchain_core.tools import StructuredTool - config_dict = ai_config.to_dict() model_dict = config_dict.get('model') or {} parameters = dict(model_dict.get('parameters') or {}) tool_definitions = parameters.pop('tools', []) or [] - return [ - StructuredTool.from_function( - func=tool_registry[name], - name=name, - description=td.get('description', ''), - ) - for name, td in _iter_valid_tools(tool_definitions, tool_registry) - ] + tools = [] + for td in tool_definitions: + if not isinstance(td, dict): + continue + name = td.get('name') + if not name: + continue + fn = tool_registry.get(name) + if fn is None: + log.warning(f"Tool '{name}' is defined in the AI config but was not found in the tool registry; skipping.") + continue + tools.append(fn) + return tools def get_ai_usage_from_response(response: Any) -> Optional[TokenUsage]: diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index 29ab68b7..6bea7ed0 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -5,7 +5,7 @@ from ldai_langchain.langchain_agent_runner import LangChainAgentRunner from ldai_langchain.langchain_helper import ( - build_structured_tools, + build_tools, create_langchain_model, ) from ldai_langchain.langchain_model_runner import LangChainModelRunner @@ -48,7 +48,7 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Lan from langchain.agents import create_agent as lc_create_agent instructions = (config.instructions or '') if hasattr(config, 'instructions') else '' llm = create_langchain_model(config) - lc_tools = build_structured_tools(config, tools or {}) + lc_tools = build_tools(config, tools or {}) agent = lc_create_agent( llm, diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 746fdd16..b86ad6e8 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -10,7 +10,7 @@ from ldai.providers.types import LDAIMetrics from ldai_langchain.langchain_helper import ( - build_structured_tools, + build_tools, create_langchain_model, extract_last_message_content, get_ai_metrics_from_response, @@ -75,7 +75,7 @@ def handle_traversal(node: AgentGraphNode, ctx: dict) -> None: model = None if node_config.model: lc_model = create_langchain_model(node_config) - tool_fns = build_structured_tools(node_config, tools_ref) + tool_fns = build_tools(node_config, tools_ref) model = lc_model.bind_tools(tool_fns) if tool_fns else lc_model def invoke(state: WorkflowState) -> WorkflowState: diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 52cca655..bad56403 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -422,7 +422,7 @@ def test_creates_agent_runner_with_instructions_and_tool_definitions(self): mock_agent = MagicMock() with patch('ldai_langchain.langchain_runner_factory.create_langchain_model') as mock_create, \ - patch('ldai_langchain.langchain_runner_factory.build_structured_tools') as mock_tools, \ + patch('ldai_langchain.langchain_runner_factory.build_tools') as mock_tools, \ patch('langchain.agents.create_agent', return_value=mock_agent): mock_create.return_value = MagicMock() mock_tools.return_value = [MagicMock()] @@ -447,7 +447,7 @@ def test_creates_agent_runner_with_no_tools(self): mock_agent = MagicMock() with patch('ldai_langchain.langchain_runner_factory.create_langchain_model') as mock_create, \ - patch('ldai_langchain.langchain_runner_factory.build_structured_tools', return_value=[]), \ + patch('ldai_langchain.langchain_runner_factory.build_tools', return_value=[]), \ patch('langchain.agents.create_agent', return_value=mock_agent): mock_create.return_value = MagicMock() diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 199bef4a..7bd08890 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -93,53 +93,45 @@ def _build_agent_tools(self) -> List[Any]: if not isinstance(td, dict): continue name = td.get("name", "") - - # Native OpenAI tools run on OpenAI's infrastructure — no local fn required. - if name and name in NATIVE_OPENAI_TOOLS: - tools.append(NATIVE_OPENAI_TOOLS[name](td)) - continue - - tool_type = td.get("type") - if tool_type and tool_type != "function": - log.warning( - f"Built-in tool '{tool_type}' is not supported and will be skipped. " - "Use the OpenAIAgentGraphRunner for built-in provider tools." - ) - continue - if not name: continue tool_fn = self._tools.get(name) - if not tool_fn: - log.warning( - f"Tool '{name}' is defined in the AI config but was not found in " - "the tool registry; skipping." - ) + if tool_fn: + def _make_invoker(fn: Any, tool_name: str) -> Any: + async def on_invoke_tool(tool_ctx: ToolContext, args_json: str) -> str: + try: + args = json.loads(args_json) if args_json else {} + except Exception: + args = {} + try: + res = fn(**args) + if hasattr(res, "__await__"): + res = await res + return str(res) + except Exception as e: + log.warning(f"Tool '{tool_name}' execution failed: {e}") + return f"Tool execution failed: {e}" + return on_invoke_tool + + tools.append(FunctionTool( + name=name, + description=td.get("description", ""), + params_json_schema=td.get("parameters", {}), + on_invoke_tool=_make_invoker(tool_fn, name), + )) continue - def _make_invoker(fn: Any, tool_name: str) -> Any: - async def on_invoke_tool(tool_ctx: ToolContext, args_json: str) -> str: - try: - args = json.loads(args_json) if args_json else {} - except Exception: - args = {} - try: - res = fn(**args) - if hasattr(res, "__await__"): - res = await res - return str(res) - except Exception as e: - log.warning(f"Tool '{tool_name}' execution failed: {e}") - return f"Tool execution failed: {e}" - return on_invoke_tool - - tools.append(FunctionTool( - name=name, - description=td.get("description", ""), - params_json_schema=td.get("parameters", {}), - on_invoke_tool=_make_invoker(tool_fn, name), - )) + # No callable in registry — try native OpenAI tool (exact name match required). + native = NATIVE_OPENAI_TOOLS.get(name) + if native: + tools.append(native(td)) + continue + + log.warning( + f"Tool '{name}' is defined in the AI config but was not found in " + "the tool registry and is not a known native tool; skipping." + ) return tools def _build_model_settings(self) -> Any: From 58d4414204ba9f544f92a97a3a93e818cea81fb2 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 08:33:55 -0500 Subject: [PATCH 08/20] simplify openai tools --- .../ldai_openai/openai_agent_graph_runner.py | 39 +------------------ .../src/ldai_openai/openai_agent_runner.py | 27 +------------ 2 files changed, 4 insertions(+), 62 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index fbddd2db..7c8590f1 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -1,6 +1,5 @@ """OpenAI agent graph runner for LaunchDarkly AI SDK.""" -import json import time from typing import Any, List, Optional @@ -121,9 +120,9 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any: try: from agents import ( Agent, - FunctionTool, Handoff, Tool, + function_tool, handoff, ) from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX @@ -177,41 +176,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: if not tool_fn: continue - def _make_tool( - name: str, - fn: Any, - description: str, - params_schema: dict, - ) -> FunctionTool: - async def wrapped(tool_ctx: Any, tool_args: str) -> str: - try: - args = json.loads(tool_args) if tool_args else {} - except Exception: - args = {} - try: - res = fn(**args) - if hasattr(res, "__await__"): - res = await res - return str(res) - except Exception as e: - log.warning(f"Tool '{name}' execution failed: {e}") - return f"Tool execution failed: {e}" - - return FunctionTool( - name=name, - description=description, - params_json_schema=params_schema, - on_invoke_tool=wrapped, - ) - - agent_tools.append( - _make_tool( - tool_name, - tool_fn, - tool_def.get('description', ''), - tool_def.get('parameters', {}), - ) - ) + agent_tools.append(function_tool(tool_fn)) return Agent( name=node_config.key, diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 7bd08890..039227bd 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -1,6 +1,5 @@ """OpenAI agent runner for LaunchDarkly AI SDK.""" -import json from typing import Any, Dict, List from ldai import log @@ -85,8 +84,7 @@ async def run(self, input: Any) -> AgentResult: def _build_agent_tools(self) -> List[Any]: """Build tool instances from LD tool definitions and registry.""" - from agents import FunctionTool - from agents.tool_context import ToolContext + from agents import function_tool tools = [] for td in self._tool_definitions: @@ -98,28 +96,7 @@ def _build_agent_tools(self) -> List[Any]: tool_fn = self._tools.get(name) if tool_fn: - def _make_invoker(fn: Any, tool_name: str) -> Any: - async def on_invoke_tool(tool_ctx: ToolContext, args_json: str) -> str: - try: - args = json.loads(args_json) if args_json else {} - except Exception: - args = {} - try: - res = fn(**args) - if hasattr(res, "__await__"): - res = await res - return str(res) - except Exception as e: - log.warning(f"Tool '{tool_name}' execution failed: {e}") - return f"Tool execution failed: {e}" - return on_invoke_tool - - tools.append(FunctionTool( - name=name, - description=td.get("description", ""), - params_json_schema=td.get("parameters", {}), - on_invoke_tool=_make_invoker(tool_fn, name), - )) + tools.append(function_tool(tool_fn)) continue # No callable in registry — try native OpenAI tool (exact name match required). From c8318a2a5b3cd9b8aacf0fcae0de7090df83089a Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 09:57:42 -0500 Subject: [PATCH 09/20] adjusting native tool use and adding graph tests --- .../tests/test_tracking_langgraph.py | 260 +++++++++++++++ .../ldai_openai/openai_agent_graph_runner.py | 6 - .../src/ldai_openai/openai_agent_runner.py | 13 +- .../src/ldai_openai/openai_helper.py | 41 ++- .../src/ldai_openai/openai_runner_factory.py | 7 + .../tests/test_tracking_openai_agents.py | 305 ++++++++++++++++++ 6 files changed, 603 insertions(+), 29 deletions(-) create mode 100644 packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py create mode 100644 packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py diff --git a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py new file mode 100644 index 00000000..7e5fb87e --- /dev/null +++ b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py @@ -0,0 +1,260 @@ +""" +Integration tests for LangGraphAgentGraphRunner tracking pipeline. + +Uses real AIGraphTracker and LDAIConfigTracker backed by a mock LD client, +and a fake LangChain model to verify that the correct LD events are emitted +with the correct payloads — without making real API calls. +""" + +import pytest +from collections import defaultdict +from unittest.mock import MagicMock, patch + +from ldai.agent_graph import AgentGraphDefinition +from ldai.models import AIAgentGraphConfig, AIAgentConfig, ModelConfig, ProviderConfig +from ldai.tracker import AIGraphTracker, LDAIConfigTracker +from ldai_langchain.langgraph_agent_graph_runner import LangGraphAgentGraphRunner + +pytestmark = pytest.mark.skipif( + pytest.importorskip('langgraph', reason='langgraph not installed') is None, + reason='langgraph not installed', +) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_graph( + mock_ld_client: MagicMock, + node_key: str = 'root-agent', + graph_key: str = 'test-graph', + tool_names: list = None, +) -> AgentGraphDefinition: + """ + Build an AgentGraphDefinition backed by real tracker objects that record + events to a mock LD client. + """ + context = MagicMock() + + node_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key=node_key, + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + + graph_tracker = AIGraphTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + graph_key=graph_key, + version=1, + context=context, + ) + + tool_defs = ( + [{'name': name, 'type': 'function', 'description': '', 'parameters': {}} + for name in tool_names] + if tool_names else None + ) + + root_config = AIAgentConfig( + key=node_key, + enabled=True, + model=ModelConfig(name='gpt-4', parameters={'tools': tool_defs} if tool_defs else {}), + provider=ProviderConfig(name='openai'), + instructions='You are a helpful assistant.', + tracker=node_tracker, + ) + + graph_config = AIAgentGraphConfig( + key=graph_key, + root_config_key=node_key, + edges=[], + enabled=True, + ) + + nodes = AgentGraphDefinition.build_nodes(graph_config, {node_key: root_config}) + return AgentGraphDefinition( + agent_graph=graph_config, + nodes=nodes, + context=context, + enabled=True, + tracker=graph_tracker, + ) + + +def _make_fake_response( + content: str, + input_tokens: int = 10, + output_tokens: int = 5, + tool_call_names: list = None, +): + """Create a real AIMessage with usage metadata and optional tool calls.""" + from langchain_core.messages import AIMessage + + tool_calls = [ + {'name': name, 'args': {}, 'id': f'call_{i}', 'type': 'tool_call'} + for i, name in enumerate(tool_call_names or []) + ] + + return AIMessage( + content=content, + tool_calls=tool_calls, + usage_metadata={ + 'input_tokens': input_tokens, + 'output_tokens': output_tokens, + 'total_tokens': input_tokens + output_tokens, + }, + ) + + +def _events(mock_ld_client: MagicMock) -> dict: + """Return dict of event_name -> list of (data, value) from all track() calls.""" + result = defaultdict(list) + for call in mock_ld_client.track.call_args_list: + name, _ctx, data, value = call.args + result[name].append((data, value)) + return dict(result) + + +def _mock_model(response): + """Return a mock LangChain model that always returns response on invoke().""" + model = MagicMock() + model.invoke.return_value = response + model.bind_tools.return_value = model + return model + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + +@pytest.mark.asyncio +async def test_tracks_node_and_graph_tokens_on_success(): + """Node-level and graph-level token events fire with the correct counts.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client) + fake_response = _make_fake_response('Sunny.', input_tokens=10, output_tokens=5) + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=_mock_model(fake_response)): + runner = LangGraphAgentGraphRunner(graph, {}) + result = await runner.run("What's the weather?") + + assert result.metrics.success is True + assert result.output == 'Sunny.' + + ev = _events(mock_ld_client) + + # Node-level token events + assert ev['$ld:ai:tokens:total'][0][1] == 15 + assert ev['$ld:ai:tokens:input'][0][1] == 10 + assert ev['$ld:ai:tokens:output'][0][1] == 5 + assert ev['$ld:ai:generation:success'][0][1] == 1 + assert '$ld:ai:duration:total' in ev + + # Graph-level events + assert ev['$ld:ai:graph:total_tokens'][0][1] == 15 + assert ev['$ld:ai:graph:invocation_success'][0][1] == 1 + assert '$ld:ai:graph:latency' in ev + assert '$ld:ai:graph:path' in ev + + +@pytest.mark.asyncio +async def test_tracks_execution_path(): + """The path event contains the executed node key.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, node_key='my-agent') + fake_response = _make_fake_response('Done.') + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=_mock_model(fake_response)): + runner = LangGraphAgentGraphRunner(graph, {}) + await runner.run('hello') + + ev = _events(mock_ld_client) + path_data = ev['$ld:ai:graph:path'][0][0] + assert 'my-agent' in path_data['path'] + + +@pytest.mark.asyncio +async def test_tracks_tool_calls(): + """A tool_call event fires for each tool name found in the model response.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, tool_names=['get_weather']) + fake_response = _make_fake_response('Calling tool.', tool_call_names=['get_weather']) + + tool_registry = {'get_weather': lambda location='NYC': 'sunny'} + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=_mock_model(fake_response)): + runner = LangGraphAgentGraphRunner(graph, tool_registry) + await runner.run('What is the weather?') + + ev = _events(mock_ld_client) + tool_events = ev.get('$ld:ai:tool_call', []) + assert len(tool_events) == 1 + assert tool_events[0][0]['toolKey'] == 'get_weather' + + +@pytest.mark.asyncio +async def test_tracks_multiple_tool_calls(): + """One tool_call event fires per tool name in the response.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, tool_names=['search', 'summarize']) + fake_response = _make_fake_response('Done.', tool_call_names=['search', 'summarize']) + + tool_registry = {'search': lambda q='': q, 'summarize': lambda text='': text} + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=_mock_model(fake_response)): + runner = LangGraphAgentGraphRunner(graph, tool_registry) + await runner.run('Search and summarize.') + + ev = _events(mock_ld_client) + tool_keys = [data['toolKey'] for data, _ in ev.get('$ld:ai:tool_call', [])] + assert sorted(tool_keys) == ['search', 'summarize'] + + +@pytest.mark.asyncio +async def test_tracks_graph_key_on_node_events(): + """Node-level events include the graphKey so they can be correlated to the graph.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, graph_key='my-graph') + fake_response = _make_fake_response('OK.', input_tokens=5, output_tokens=3) + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=_mock_model(fake_response)): + runner = LangGraphAgentGraphRunner(graph, {}) + await runner.run('hello') + + ev = _events(mock_ld_client) + token_data = ev['$ld:ai:tokens:total'][0][0] + assert token_data.get('graphKey') == 'my-graph' + + +@pytest.mark.asyncio +async def test_tracks_failure_and_latency_on_model_error(): + """When the model raises, failure and latency events fire; success does not.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client) + + error_model = MagicMock() + error_model.invoke.side_effect = RuntimeError('model error') + error_model.bind_tools.return_value = error_model + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + return_value=error_model): + runner = LangGraphAgentGraphRunner(graph, {}) + result = await runner.run('fail') + + assert result.metrics.success is False + + ev = _events(mock_ld_client) + assert '$ld:ai:graph:invocation_failure' in ev + assert '$ld:ai:graph:latency' in ev + assert '$ld:ai:graph:invocation_success' not in ev diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 7c8590f1..40d829bc 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -10,7 +10,6 @@ from ldai.tracker import TokenUsage from ldai_openai.openai_helper import ( - NATIVE_OPENAI_TOOLS, extract_usage_from_request_entry, get_ai_usage_from_response, get_tool_calls_from_run_items, @@ -167,11 +166,6 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: for tool_def in tool_defs: tool_name = tool_def.get('name', '') - # Check native OpenAI tools first, then fall back to ToolRegistry - if tool_name in NATIVE_OPENAI_TOOLS: - agent_tools.append(NATIVE_OPENAI_TOOLS[tool_name](tool_def)) - continue - tool_fn = self._tools.get(tool_name) if not tool_fn: continue diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 039227bd..bed1cbaa 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -6,10 +6,7 @@ from ldai.providers import AgentResult, AgentRunner, ToolRegistry from ldai.providers.types import LDAIMetrics -from ldai_openai.openai_helper import ( - NATIVE_OPENAI_TOOLS, - get_ai_usage_from_response, -) +from ldai_openai.openai_helper import get_ai_usage_from_response class OpenAIAgentRunner(AgentRunner): @@ -99,15 +96,9 @@ def _build_agent_tools(self) -> List[Any]: tools.append(function_tool(tool_fn)) continue - # No callable in registry — try native OpenAI tool (exact name match required). - native = NATIVE_OPENAI_TOOLS.get(name) - if native: - tools.append(native(td)) - continue - log.warning( f"Tool '{name}' is defined in the AI config but was not found in " - "the tool registry and is not a known native tool; skipping." + "the tool registry; skipping." ) return tools diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 6e4ade17..3ab89b46 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -6,18 +6,6 @@ from openai.types.chat import ChatCompletionMessageParam -def _build_native_tool_map() -> Dict[str, Any]: - try: - from agents import WebSearchTool - return { - 'web_search_tool': lambda _: WebSearchTool(), - } - except ImportError: - return {} - - -NATIVE_OPENAI_TOOLS: Dict[str, Any] = _build_native_tool_map() - def convert_messages_to_openai(messages: List[LDMessage]) -> Iterable[ChatCompletionMessageParam]: """ @@ -91,6 +79,35 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: return LDAIMetrics(success=True, usage=get_ai_usage_from_response(response)) +# Tool names that require their own API type in the Chat Completions API. +# LD stores all tools as type="function"; these are converted to their correct type. +_NATIVE_API_TOOL_NAMES = frozenset({ + 'web_search_tool', + 'file_search', + 'computer_use_preview', +}) + + +def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: + """ + Convert LD tool definitions to Chat Completions API format. + + LD emits all tools as ``type="function"`` with a flat structure. This helper + wraps regular function tools in the nested ``function`` key the API requires, + and converts known native tool names to their correct API type without a schema. + + :param tool_definitions: Tool definitions from the LD AI config + :return: Tool list ready to pass to ``chat.completions.create`` + """ + result = [] + for td in tool_definitions: + if not isinstance(td, dict): + continue + name = td.get('name', '') + result.append({**td, 'type': name} if name in _NATIVE_API_TOOL_NAMES else td) + return result + + # Native tool raw_item type names don't always match the LD config key convention. _NATIVE_TOOL_TYPE_TO_CONFIG_KEY = { 'web_search': 'web_search_tool', diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 28166557..8d563d33 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -5,6 +5,7 @@ from ldai.providers import AIProvider, ToolRegistry from openai import AsyncOpenAI +from ldai_openai.openai_helper import normalize_tool_types from ldai_openai.openai_model_runner import OpenAIModelRunner if TYPE_CHECKING: @@ -40,11 +41,17 @@ def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: Create a configured OpenAIModelRunner for the given AI config. Reuses the underlying AsyncOpenAI client so connection pooling is preserved. + Tool definitions are converted from LD's flat format to the Chat Completions + API format, with native tools mapped to their correct API type. :param config: The LaunchDarkly AI configuration :return: OpenAIModelRunner ready to invoke the model """ model_name, parameters = self._extract_model_config(config) + parameters = dict(parameters) + tool_defs = parameters.pop('tools', None) or [] + if tool_defs: + parameters['tools'] = normalize_tool_types(tool_defs) return OpenAIModelRunner(self._client, model_name, parameters) def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: diff --git a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py new file mode 100644 index 00000000..e8cbc913 --- /dev/null +++ b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py @@ -0,0 +1,305 @@ +""" +Integration tests for OpenAIAgentGraphRunner tracking pipeline. + +Uses real AIGraphTracker and LDAIConfigTracker backed by a mock LD client, +and a crafted RunResult to verify that the correct LD events are emitted +with the correct payloads — without making real API calls. +""" + +import pytest +from collections import defaultdict +from unittest.mock import AsyncMock, MagicMock, patch + +from ldai.agent_graph import AgentGraphDefinition +from ldai.models import AIAgentGraphConfig, AIAgentConfig, ModelConfig, ProviderConfig +from ldai.tracker import AIGraphTracker, LDAIConfigTracker +from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_graph( + mock_ld_client: MagicMock, + node_key: str = 'root-agent', + graph_key: str = 'test-graph', + tool_names: list = None, +) -> AgentGraphDefinition: + """ + Build an AgentGraphDefinition backed by real tracker objects that record + events to a mock LD client. + """ + context = MagicMock() + + node_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key=node_key, + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + + graph_tracker = AIGraphTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + graph_key=graph_key, + version=1, + context=context, + ) + + tool_defs = ( + [{'name': name, 'type': 'function', 'description': '', 'parameters': {}} + for name in tool_names] + if tool_names else None + ) + + root_config = AIAgentConfig( + key=node_key, + enabled=True, + model=ModelConfig(name='gpt-4', parameters={'tools': tool_defs} if tool_defs else {}), + provider=ProviderConfig(name='openai'), + instructions='You are a helpful assistant.', + tracker=node_tracker, + ) + + graph_config = AIAgentGraphConfig( + key=graph_key, + root_config_key=node_key, + edges=[], + enabled=True, + ) + + nodes = AgentGraphDefinition.build_nodes(graph_config, {node_key: root_config}) + return AgentGraphDefinition( + agent_graph=graph_config, + nodes=nodes, + context=context, + enabled=True, + tracker=graph_tracker, + ) + + +def _make_run_result( + output: str = 'agent answer', + total_tokens: int = 0, + input_tokens: int = 0, + output_tokens: int = 0, + tool_call_items: list = None, +) -> MagicMock: + """ + Build a mock RunResult that resembles the openai-agents SDK RunResult shape + expected by OpenAIAgentGraphRunner. + """ + entry = MagicMock() + entry.total_tokens = total_tokens + entry.input_tokens = input_tokens + entry.output_tokens = output_tokens + + result = MagicMock() + result.final_output = output + result.new_items = tool_call_items or [] + result.usage = None # prevent fallthrough to .usage attribute in get_ai_usage_from_response + result.context_wrapper.usage.total_tokens = total_tokens + result.context_wrapper.usage.input_tokens = input_tokens + result.context_wrapper.usage.output_tokens = output_tokens + result.context_wrapper.usage.request_usage_entries = [entry] + return result + + +def _make_tool_call_item(agent_name: str, tool_name: str) -> MagicMock: + """ + Create a mock ToolCallItem with a ResponseFunctionToolCall raw item so that + get_tool_calls_from_run_items() correctly extracts the tool name. + """ + from agents.items import ToolCallItem + from openai.types.responses import ResponseFunctionToolCall + + raw = MagicMock(spec=ResponseFunctionToolCall) + raw.name = tool_name + + agent = MagicMock() + agent.name = agent_name + + item = MagicMock(spec=ToolCallItem) + item.agent = agent + item.raw_item = raw + return item + + +def _make_agents_modules(run_result: MagicMock) -> dict: + """Build the sys.modules patch dict for the agents package.""" + mock_runner = MagicMock() + mock_runner.run = AsyncMock(return_value=run_result) + + mock_agents = MagicMock() + mock_agents.Runner = mock_runner + mock_agents.Agent = MagicMock(return_value=MagicMock()) + mock_agents.Handoff = MagicMock() + mock_agents.Tool = MagicMock() + mock_agents.function_tool = lambda fn: MagicMock() + mock_agents.handoff = MagicMock(return_value=MagicMock()) + + mock_ext = MagicMock() + mock_ext.RECOMMENDED_PROMPT_PREFIX = '[PREFIX]' + + return { + 'agents': mock_agents, + 'agents.extensions': MagicMock(), + 'agents.extensions.handoff_prompt': mock_ext, + 'agents.tool_context': MagicMock(), + } + + +def _events(mock_ld_client: MagicMock) -> dict: + """Return dict of event_name -> list of (data, value) from all track() calls.""" + result = defaultdict(list) + for call in mock_ld_client.track.call_args_list: + name, _ctx, data, value = call.args + result[name].append((data, value)) + return dict(result) + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + +@pytest.mark.asyncio +async def test_tracks_graph_invocation_success_and_latency(): + """Graph-level success and latency events fire on a successful run.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client) + run_result = _make_run_result(output='done') + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + result = await runner.run('hello') + + assert result.metrics.success is True + assert result.output == 'done' + + ev = _events(mock_ld_client) + assert ev['$ld:ai:graph:invocation_success'][0][1] == 1 + assert '$ld:ai:graph:latency' in ev + assert '$ld:ai:graph:path' in ev + + +@pytest.mark.asyncio +async def test_tracks_per_node_tokens_and_success(): + """Node-level token and success events fire with correct values.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, node_key='root-agent', graph_key='test-graph') + run_result = _make_run_result( + output='answer', + total_tokens=30, + input_tokens=20, + output_tokens=10, + ) + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + await runner.run('hello') + + ev = _events(mock_ld_client) + + # Node-level events + assert ev['$ld:ai:tokens:total'][0][1] == 30 + assert ev['$ld:ai:tokens:input'][0][1] == 20 + assert ev['$ld:ai:tokens:output'][0][1] == 10 + assert ev['$ld:ai:generation:success'][0][1] == 1 + + # Graph-level total tokens + assert ev['$ld:ai:graph:total_tokens'][0][1] == 30 + + +@pytest.mark.asyncio +async def test_tracks_graph_key_on_node_events(): + """Node-level events include graphKey so they can be correlated to the graph.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, graph_key='my-graph') + run_result = _make_run_result(total_tokens=15, input_tokens=10, output_tokens=5) + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + await runner.run('hello') + + ev = _events(mock_ld_client) + token_data = ev['$ld:ai:tokens:total'][0][0] + assert token_data.get('graphKey') == 'my-graph' + + +@pytest.mark.asyncio +async def test_tracks_tool_calls_from_run_items(): + """A tool_call event fires for each tool found in RunResult.new_items.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, node_key='root-agent') + + tool_item = _make_tool_call_item('root-agent', 'get_weather') + run_result = _make_run_result(output='done', tool_call_items=[tool_item]) + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + await runner.run('What is the weather?') + + ev = _events(mock_ld_client) + tool_events = ev.get('$ld:ai:tool_call', []) + assert len(tool_events) == 1 + assert tool_events[0][0]['toolKey'] == 'get_weather' + + +@pytest.mark.asyncio +async def test_tracks_multiple_tool_calls(): + """One tool_call event fires per tool in RunResult.new_items.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, node_key='root-agent') + + items = [ + _make_tool_call_item('root-agent', 'search'), + _make_tool_call_item('root-agent', 'summarize'), + ] + run_result = _make_run_result(output='done', tool_call_items=items) + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + await runner.run('Search and summarize.') + + ev = _events(mock_ld_client) + tool_keys = [data['toolKey'] for data, _ in ev.get('$ld:ai:tool_call', [])] + assert sorted(tool_keys) == ['search', 'summarize'] + + +@pytest.mark.asyncio +async def test_tracks_failure_and_latency_on_runner_error(): + """When Runner.run raises, failure and latency events fire; success does not.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client) + + mock_runner = MagicMock() + mock_runner.run = AsyncMock(side_effect=RuntimeError('runner error')) + mock_agents = MagicMock() + mock_agents.Runner = mock_runner + mock_agents.Agent = MagicMock(return_value=MagicMock()) + mock_agents.Handoff = MagicMock() + mock_agents.Tool = MagicMock() + mock_agents.function_tool = lambda fn: MagicMock() + mock_agents.handoff = MagicMock(return_value=MagicMock()) + mock_ext = MagicMock() + mock_ext.RECOMMENDED_PROMPT_PREFIX = '[PREFIX]' + + with patch.dict('sys.modules', { + 'agents': mock_agents, + 'agents.extensions': MagicMock(), + 'agents.extensions.handoff_prompt': mock_ext, + 'agents.tool_context': MagicMock(), + }): + runner = OpenAIAgentGraphRunner(graph, {}) + result = await runner.run('fail') + + assert result.metrics.success is False + + ev = _events(mock_ld_client) + assert '$ld:ai:graph:invocation_failure' in ev + assert '$ld:ai:graph:latency' in ev + assert '$ld:ai:graph:invocation_success' not in ev From 69ba99f82bf87a5935f271e32142c5b30e58355e Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 11:02:03 -0500 Subject: [PATCH 10/20] add multi node tests and sanitize agent names for openai --- .../tests/test_tracking_langgraph.py | 108 +++++++++++- .../ldai_openai/openai_agent_graph_runner.py | 21 ++- .../tests/test_tracking_openai_agents.py | 156 +++++++++++++++++- 3 files changed, 279 insertions(+), 6 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py index 7e5fb87e..7fc9ab62 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py @@ -11,7 +11,7 @@ from unittest.mock import MagicMock, patch from ldai.agent_graph import AgentGraphDefinition -from ldai.models import AIAgentGraphConfig, AIAgentConfig, ModelConfig, ProviderConfig +from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig from ldai.tracker import AIGraphTracker, LDAIConfigTracker from ldai_langchain.langgraph_agent_graph_runner import LangGraphAgentGraphRunner @@ -129,6 +129,74 @@ def _mock_model(response): return model +def _make_two_node_graph(mock_ld_client: MagicMock) -> 'AgentGraphDefinition': + """Build a two-node AgentGraphDefinition (root-agent → child-agent).""" + context = MagicMock() + + root_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key='root-agent', + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + child_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key='child-agent', + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + graph_tracker = AIGraphTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + graph_key='two-node-graph', + version=1, + context=context, + ) + + root_config = AIAgentConfig( + key='root-agent', + enabled=True, + model=ModelConfig(name='gpt-4', parameters={}), + provider=ProviderConfig(name='openai'), + instructions='You are root.', + tracker=root_tracker, + ) + child_config = AIAgentConfig( + key='child-agent', + enabled=True, + model=ModelConfig(name='gpt-4', parameters={}), + provider=ProviderConfig(name='openai'), + instructions='You are child.', + tracker=child_tracker, + ) + + edge = Edge(key='root-to-child', source_config='root-agent', target_config='child-agent') + graph_config = AIAgentGraphConfig( + key='two-node-graph', + root_config_key='root-agent', + edges=[edge], + enabled=True, + ) + + nodes = AgentGraphDefinition.build_nodes(graph_config, { + 'root-agent': root_config, + 'child-agent': child_config, + }) + return AgentGraphDefinition( + agent_graph=graph_config, + nodes=nodes, + context=context, + enabled=True, + tracker=graph_tracker, + ) + + # --------------------------------------------------------------------------- # Tests # --------------------------------------------------------------------------- @@ -258,3 +326,41 @@ async def test_tracks_failure_and_latency_on_model_error(): assert '$ld:ai:graph:invocation_failure' in ev assert '$ld:ai:graph:latency' in ev assert '$ld:ai:graph:invocation_success' not in ev + + +@pytest.mark.asyncio +async def test_multi_node_tracks_per_node_tokens_and_path(): + """Each node emits its own token events; path and graph total cover both nodes.""" + mock_ld_client = MagicMock() + graph = _make_two_node_graph(mock_ld_client) + + root_response = _make_fake_response('Root done.', input_tokens=10, output_tokens=5) + child_response = _make_fake_response('Child done.', input_tokens=3, output_tokens=2) + + def model_factory(node_config): + if node_config.key == 'root-agent': + return _mock_model(root_response) + return _mock_model(child_response) + + with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', + side_effect=model_factory): + runner = LangGraphAgentGraphRunner(graph, {}) + result = await runner.run('hello') + + assert result.metrics.success is True + + ev = _events(mock_ld_client) + + # Per-node token events identified by configKey + root_tokens = [(d, v) for d, v in ev.get('$ld:ai:tokens:total', []) if d.get('configKey') == 'root-agent'] + child_tokens = [(d, v) for d, v in ev.get('$ld:ai:tokens:total', []) if d.get('configKey') == 'child-agent'] + assert root_tokens[0][1] == 15 + assert child_tokens[0][1] == 5 + + # Graph-level total accumulates both nodes (10+3 in, 5+2 out) + assert ev['$ld:ai:graph:total_tokens'][0][1] == 20 + + # Execution path includes both node keys + path_data = ev['$ld:ai:graph:path'][0][0] + assert 'root-agent' in path_data['path'] + assert 'child-agent' in path_data['path'] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 40d829bc..bc4b50a5 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -1,7 +1,8 @@ """OpenAI agent graph runner for LaunchDarkly AI SDK.""" +import re import time -from typing import Any, List, Optional +from typing import Any, Dict, List, Optional from ldai import log from ldai.agent_graph import AgentGraphDefinition, AgentGraphNode @@ -16,6 +17,11 @@ ) +def _sanitize_agent_name(key: str) -> str: + """Replace characters invalid for OpenAI function names with underscores.""" + return re.sub(r'[^a-zA-Z0-9_]', '_', key) + + class _RunState: """Mutable state shared across handoff and tool callbacks during a single run.""" @@ -44,6 +50,7 @@ def __init__(self, graph: AgentGraphDefinition, tools: ToolRegistry): """ self._graph = graph self._tools = tools + self._agent_name_map: Dict[str, str] = {} async def run(self, input: Any) -> AgentGraphResult: """ @@ -132,6 +139,7 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any: ) from exc tracker = self._graph.get_tracker() + name_map: Dict[str, str] = {} def build_node(node: AgentGraphNode, ctx: dict) -> Any: node_config = node.get_config() @@ -142,6 +150,8 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: raise ValueError(f"Model not set for node '{node_config.key}'") tool_defs = model.get_parameter('tools') or [] + sanitized_name = _sanitize_agent_name(node_config.key) + name_map[sanitized_name] = node_config.key # --- handoffs --- agent_handoffs: List[Handoff] = [] @@ -173,14 +183,16 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: agent_tools.append(function_tool(tool_fn)) return Agent( - name=node_config.key, + name=sanitized_name, model=model.name, instructions=f'{RECOMMENDED_PROMPT_PREFIX} {node_config.instructions or ""}', handoffs=list(agent_handoffs), tools=list(agent_tools), ) - return self._graph.reverse_traverse(fn=build_node) + root = self._graph.reverse_traverse(fn=build_node) + self._agent_name_map = name_map + return root def _make_on_handoff( self, @@ -269,7 +281,8 @@ def _track_tool_calls(self, result: Any, tracker: Any) -> None: """Track all tool calls from the run result, attributed to the node that called them.""" gk = tracker.graph_key if tracker is not None else None for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items): - node = self._graph.get_node(agent_name) + original_key = self._agent_name_map.get(agent_name, agent_name) + node = self._graph.get_node(original_key) if node is None: continue config_tracker = node.get_config().tracker diff --git a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py index e8cbc913..59ffed5c 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py +++ b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py @@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock, patch from ldai.agent_graph import AgentGraphDefinition -from ldai.models import AIAgentGraphConfig, AIAgentConfig, ModelConfig, ProviderConfig +from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig from ldai.tracker import AIGraphTracker, LDAIConfigTracker from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner @@ -153,6 +153,74 @@ def _make_agents_modules(run_result: MagicMock) -> dict: } +def _make_two_node_graph(mock_ld_client: MagicMock) -> AgentGraphDefinition: + """Build a two-node AgentGraphDefinition (root-agent → child-agent).""" + context = MagicMock() + + root_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key='root-agent', + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + child_tracker = LDAIConfigTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + config_key='child-agent', + version=1, + model_name='gpt-4', + provider_name='openai', + context=context, + ) + graph_tracker = AIGraphTracker( + ld_client=mock_ld_client, + variation_key='test-variation', + graph_key='two-node-graph', + version=1, + context=context, + ) + + root_config = AIAgentConfig( + key='root-agent', + enabled=True, + model=ModelConfig(name='gpt-4', parameters={}), + provider=ProviderConfig(name='openai'), + instructions='You are root.', + tracker=root_tracker, + ) + child_config = AIAgentConfig( + key='child-agent', + enabled=True, + model=ModelConfig(name='gpt-4', parameters={}), + provider=ProviderConfig(name='openai'), + instructions='You are child.', + tracker=child_tracker, + ) + + edge = Edge(key='root-to-child', source_config='root-agent', target_config='child-agent') + graph_config = AIAgentGraphConfig( + key='two-node-graph', + root_config_key='root-agent', + edges=[edge], + enabled=True, + ) + + nodes = AgentGraphDefinition.build_nodes(graph_config, { + 'root-agent': root_config, + 'child-agent': child_config, + }) + return AgentGraphDefinition( + agent_graph=graph_config, + nodes=nodes, + context=context, + enabled=True, + tracker=graph_tracker, + ) + + def _events(mock_ld_client: MagicMock) -> dict: """Return dict of event_name -> list of (data, value) from all track() calls.""" result = defaultdict(list) @@ -303,3 +371,89 @@ async def test_tracks_failure_and_latency_on_runner_error(): assert '$ld:ai:graph:invocation_failure' in ev assert '$ld:ai:graph:latency' in ev assert '$ld:ai:graph:invocation_success' not in ev + + +@pytest.mark.asyncio +async def test_multi_node_tracks_per_node_tokens_and_handoff(): + """Each node emits its own token events; handoff event fires between them.""" + mock_ld_client = MagicMock() + graph = _make_two_node_graph(mock_ld_client) + + root_entry = MagicMock() + root_entry.total_tokens = 15 + root_entry.input_tokens = 10 + root_entry.output_tokens = 5 + + child_entry = MagicMock() + child_entry.total_tokens = 9 + child_entry.input_tokens = 6 + child_entry.output_tokens = 3 + + run_result = MagicMock() + run_result.final_output = 'child answer' + run_result.new_items = [] + run_result.usage = None + run_result.context_wrapper.usage.total_tokens = 24 + run_result.context_wrapper.usage.input_tokens = 16 + run_result.context_wrapper.usage.output_tokens = 8 + run_result.context_wrapper.usage.request_usage_entries = [root_entry, child_entry] + + on_handoff_callbacks = [] + + def capture_handoff(**kwargs): + cb = kwargs.get('on_handoff') + if cb: + on_handoff_callbacks.append(cb) + return MagicMock() + + async def mock_run(agent, input_str, **kwargs): + # Simulate the root→child handoff before returning + if on_handoff_callbacks: + run_ctx = MagicMock() + run_ctx.usage.request_usage_entries = [root_entry] + on_handoff_callbacks[0](run_ctx) + return run_result + + mock_runner_cls = MagicMock() + mock_runner_cls.run = mock_run + + mock_agents = MagicMock() + mock_agents.Runner = mock_runner_cls + mock_agents.Agent = MagicMock(return_value=MagicMock()) + mock_agents.Handoff = MagicMock() + mock_agents.Tool = MagicMock() + mock_agents.function_tool = lambda fn: MagicMock() + mock_agents.handoff = capture_handoff + + mock_ext = MagicMock() + mock_ext.RECOMMENDED_PROMPT_PREFIX = '[PREFIX]' + + with patch.dict('sys.modules', { + 'agents': mock_agents, + 'agents.extensions': MagicMock(), + 'agents.extensions.handoff_prompt': mock_ext, + 'agents.tool_context': MagicMock(), + }): + runner = OpenAIAgentGraphRunner(graph, {}) + result = await runner.run('hello') + + assert result.metrics.success is True + + ev = _events(mock_ld_client) + + # Per-node token events identified by configKey + root_tokens = [(d, v) for d, v in ev.get('$ld:ai:tokens:total', []) if d.get('configKey') == 'root-agent'] + child_tokens = [(d, v) for d, v in ev.get('$ld:ai:tokens:total', []) if d.get('configKey') == 'child-agent'] + assert root_tokens[0][1] == 15 + assert child_tokens[0][1] == 9 + + # Execution path includes both node keys + path_data = ev['$ld:ai:graph:path'][0][0] + assert 'root-agent' in path_data['path'] + assert 'child-agent' in path_data['path'] + + # Handoff event fires with correct source and target + handoff_events = ev.get('$ld:ai:graph:handoff_success', []) + assert len(handoff_events) == 1 + assert handoff_events[0][0]['sourceKey'] == 'root-agent' + assert handoff_events[0][0]['targetKey'] == 'child-agent' From c9cc0a959adf205195dfbd2765c836431ec8a792 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 14:11:03 -0500 Subject: [PATCH 11/20] fix sending proper tool name in openai --- .../tests/test_tracking_langgraph.py | 38 ++++++++++++++++--- .../ldai_openai/openai_agent_graph_runner.py | 8 +++- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py index 7fc9ab62..042de16e 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py @@ -254,12 +254,23 @@ async def test_tracks_tool_calls(): """A tool_call event fires for each tool name found in the model response.""" mock_ld_client = MagicMock() graph = _make_graph(mock_ld_client, tool_names=['get_weather']) - fake_response = _make_fake_response('Calling tool.', tool_call_names=['get_weather']) - tool_registry = {'get_weather': lambda location='NYC': 'sunny'} + # Model returns a tool call on the first invoke, then a final answer. + tool_response = _make_fake_response('Calling tool.', tool_call_names=['get_weather']) + final_response = _make_fake_response('It is sunny in NYC.') + + mock_model = MagicMock() + mock_model.invoke.side_effect = [tool_response, final_response] + mock_model.bind_tools.return_value = mock_model + + def get_weather(location: str = 'NYC') -> str: + """Return the current weather for a location.""" + return 'sunny' + + tool_registry = {'get_weather': get_weather} with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', - return_value=_mock_model(fake_response)): + return_value=mock_model): runner = LangGraphAgentGraphRunner(graph, tool_registry) await runner.run('What is the weather?') @@ -274,12 +285,27 @@ async def test_tracks_multiple_tool_calls(): """One tool_call event fires per tool name in the response.""" mock_ld_client = MagicMock() graph = _make_graph(mock_ld_client, tool_names=['search', 'summarize']) - fake_response = _make_fake_response('Done.', tool_call_names=['search', 'summarize']) - tool_registry = {'search': lambda q='': q, 'summarize': lambda text='': text} + # Both tools called in one response; second invoke returns a final answer. + tool_response = _make_fake_response('Done.', tool_call_names=['search', 'summarize']) + final_response = _make_fake_response('Here is the summary.') + + mock_model = MagicMock() + mock_model.invoke.side_effect = [tool_response, final_response] + mock_model.bind_tools.return_value = mock_model + + def search(q: str = '') -> str: + """Search for information.""" + return q + + def summarize(text: str = '') -> str: + """Summarize the given text.""" + return text + + tool_registry = {'search': search, 'summarize': summarize} with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', - return_value=_mock_model(fake_response)): + return_value=mock_model): runner = LangGraphAgentGraphRunner(graph, tool_registry) await runner.run('Search and summarize.') diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index bc4b50a5..1d9fb96b 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -51,6 +51,7 @@ def __init__(self, graph: AgentGraphDefinition, tools: ToolRegistry): self._graph = graph self._tools = tools self._agent_name_map: Dict[str, str] = {} + self._tool_name_map: Dict[str, str] = {} async def run(self, input: Any) -> AgentGraphResult: """ @@ -140,6 +141,7 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any: tracker = self._graph.get_tracker() name_map: Dict[str, str] = {} + tool_name_map: Dict[str, str] = {} def build_node(node: AgentGraphNode, ctx: dict) -> Any: node_config = node.get_config() @@ -180,6 +182,8 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: if not tool_fn: continue + # Map fn.__name__ → config key so tracked names match the AI config. + tool_name_map[tool_fn.__name__] = tool_name agent_tools.append(function_tool(tool_fn)) return Agent( @@ -192,6 +196,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: root = self._graph.reverse_traverse(fn=build_node) self._agent_name_map = name_map + self._tool_name_map = tool_name_map return root def _make_on_handoff( @@ -280,8 +285,9 @@ def _flush_final_segment( def _track_tool_calls(self, result: Any, tracker: Any) -> None: """Track all tool calls from the run result, attributed to the node that called them.""" gk = tracker.graph_key if tracker is not None else None - for agent_name, tool_name in get_tool_calls_from_run_items(result.new_items): + for agent_name, tool_fn_name in get_tool_calls_from_run_items(result.new_items): original_key = self._agent_name_map.get(agent_name, agent_name) + tool_name = self._tool_name_map.get(tool_fn_name, '') node = self._graph.get_node(original_key) if node is None: continue From 45ac2820f769ea70ae77a0561cb8e58046a3ec80 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 14:34:28 -0500 Subject: [PATCH 12/20] attempt to fix openai native tool calls --- .../ldai_openai/openai_agent_graph_runner.py | 19 ++++--- .../src/ldai_openai/openai_agent_runner.py | 6 +- .../src/ldai_openai/openai_helper.py | 55 +++++++++++++++++++ 3 files changed, 69 insertions(+), 11 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 1d9fb96b..ea514700 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -14,6 +14,8 @@ extract_usage_from_request_entry, get_ai_usage_from_response, get_tool_calls_from_run_items, + is_agent_tool_instance, + registry_value_to_agent_tool, ) @@ -46,7 +48,7 @@ def __init__(self, graph: AgentGraphDefinition, tools: ToolRegistry): Initialize the runner. :param graph: The AgentGraphDefinition to execute - :param tools: Registry mapping tool names to callables + :param tools: Registry mapping tool names to callables or native ``Tool`` instances """ self._graph = graph self._tools = tools @@ -128,8 +130,6 @@ def _build_agents(self, path: List[str], state: _RunState) -> Any: from agents import ( Agent, Handoff, - Tool, - function_tool, handoff, ) from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX @@ -174,7 +174,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: ) # --- tools --- - agent_tools: List[Tool] = [] + agent_tools: List[Any] = [] for tool_def in tool_defs: tool_name = tool_def.get('name', '') @@ -182,9 +182,13 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: if not tool_fn: continue - # Map fn.__name__ → config key so tracked names match the AI config. - tool_name_map[tool_fn.__name__] = tool_name - agent_tools.append(function_tool(tool_fn)) + # Map runtime tool name → LD config key for metrics (function __name__ + # for callables; identity for native tool instances — see get_tool_calls_from_run_items). + if is_agent_tool_instance(tool_fn): + tool_name_map[f'{tool_fn.name}_call'] = tool_name + else: + tool_name_map[tool_fn.__name__] = tool_name + agent_tools.append(registry_value_to_agent_tool(tool_fn)) return Agent( name=sanitized_name, @@ -286,6 +290,7 @@ def _track_tool_calls(self, result: Any, tracker: Any) -> None: """Track all tool calls from the run result, attributed to the node that called them.""" gk = tracker.graph_key if tracker is not None else None for agent_name, tool_fn_name in get_tool_calls_from_run_items(result.new_items): + log.info(f"Tracking tool call: agent_name={agent_name}, tool_fn_name={tool_fn_name}") original_key = self._agent_name_map.get(agent_name, agent_name) tool_name = self._tool_name_map.get(tool_fn_name, '') node = self._graph.get_node(original_key) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index bed1cbaa..1525fc93 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -6,7 +6,7 @@ from ldai.providers import AgentResult, AgentRunner, ToolRegistry from ldai.providers.types import LDAIMetrics -from ldai_openai.openai_helper import get_ai_usage_from_response +from ldai_openai.openai_helper import get_ai_usage_from_response, registry_value_to_agent_tool class OpenAIAgentRunner(AgentRunner): @@ -81,8 +81,6 @@ async def run(self, input: Any) -> AgentResult: def _build_agent_tools(self) -> List[Any]: """Build tool instances from LD tool definitions and registry.""" - from agents import function_tool - tools = [] for td in self._tool_definitions: if not isinstance(td, dict): @@ -93,7 +91,7 @@ def _build_agent_tools(self) -> List[Any]: tool_fn = self._tools.get(name) if tool_fn: - tools.append(function_tool(tool_fn)) + tools.append(registry_value_to_agent_tool(tool_fn)) continue log.warning( diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 3ab89b46..7078a813 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -1,3 +1,4 @@ +import typing from typing import Any, Dict, Iterable, List, Optional, Tuple, cast from ldai import LDMessage @@ -113,6 +114,60 @@ def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: 'web_search': 'web_search_tool', } +# ``agents.Tool`` is a typing.Union of concrete tool classes, not a runtime class. +# Using ``isinstance(x, Tool)`` raises TypeError (subscripted generics / union checks). +_AGENT_TOOL_TYPES: Optional[Tuple[type, ...]] = None + + +def _concrete_agent_tool_types() -> Tuple[type, ...]: + """Resolve concrete classes behind ``agents.Tool`` (a Union alias).""" + try: + from agents import Tool as ToolUnion + except ImportError: + return () + args = typing.get_args(ToolUnion) + if not args: + return () + out: List[type] = [] + for a in args: + origin = getattr(a, '__origin__', None) + if origin is not None and isinstance(origin, type): + out.append(origin) + elif isinstance(a, type): + out.append(a) + return tuple(out) + + +def is_agent_tool_instance(value: Any) -> bool: + """True if ``value`` is already an openai-agents tool object (not a plain callable).""" + global _AGENT_TOOL_TYPES + if _AGENT_TOOL_TYPES is None: + _AGENT_TOOL_TYPES = _concrete_agent_tool_types() + if not _AGENT_TOOL_TYPES: + return False + return isinstance(value, _AGENT_TOOL_TYPES) + + +def registry_value_to_agent_tool(value: Any) -> Any: + """ + Turn a ToolRegistry value into an object the OpenAI Agents SDK accepts in ``Agent(tools=…)``. + + Plain callables are wrapped with ``function_tool``. Values that are already + tool instances (e.g. ``WebSearchTool()``, ``FileSearchTool(...)``) are + returned unchanged so they are not double-wrapped. + """ + try: + from agents import function_tool + except ImportError as exc: + raise ImportError( + "openai-agents is required for agent tools. " + "Install it with: pip install openai-agents" + ) from exc + + if is_agent_tool_instance(value): + return value + return function_tool(value) + def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]]: """ From 51f3ba61370f8578fde8eb2f09da33c7b46798c9 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 1 Apr 2026 16:50:51 -0500 Subject: [PATCH 13/20] fix openai tool call tracking and lint issues --- .../ldai_langchain/langchain_agent_runner.py | 5 +- .../src/ldai_langchain/langchain_helper.py | 2 +- .../ldai_openai/openai_agent_graph_runner.py | 11 ++-- .../src/ldai_openai/openai_agent_runner.py | 5 +- .../src/ldai_openai/openai_helper.py | 56 +++++-------------- .../tests/test_tracking_openai_agents.py | 44 +++++++++++++-- .../src/ldai/providers/runner_factory.py | 2 +- 7 files changed, 68 insertions(+), 57 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index bb637714..8144dd36 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -6,7 +6,10 @@ from ldai.providers import AgentResult, AgentRunner from ldai.providers.types import LDAIMetrics -from ldai_langchain.langchain_helper import extract_last_message_content, sum_token_usage_from_messages +from ldai_langchain.langchain_helper import ( + extract_last_message_content, + sum_token_usage_from_messages, +) class LangChainAgentRunner(AgentRunner): diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py index b1b18c8f..7915c9f8 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import Any, Dict, List, Optional, Union from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import AIMessage, HumanMessage, SystemMessage diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index ea514700..d93cc1a6 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -185,7 +185,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: # Map runtime tool name → LD config key for metrics (function __name__ # for callables; identity for native tool instances — see get_tool_calls_from_run_items). if is_agent_tool_instance(tool_fn): - tool_name_map[f'{tool_fn.name}_call'] = tool_name + tool_name_map[tool_fn.name] = tool_name else: tool_name_map[tool_fn.__name__] = tool_name agent_tools.append(registry_value_to_agent_tool(tool_fn)) @@ -290,10 +290,11 @@ def _track_tool_calls(self, result: Any, tracker: Any) -> None: """Track all tool calls from the run result, attributed to the node that called them.""" gk = tracker.graph_key if tracker is not None else None for agent_name, tool_fn_name in get_tool_calls_from_run_items(result.new_items): - log.info(f"Tracking tool call: agent_name={agent_name}, tool_fn_name={tool_fn_name}") - original_key = self._agent_name_map.get(agent_name, agent_name) - tool_name = self._tool_name_map.get(tool_fn_name, '') - node = self._graph.get_node(original_key) + agent_key = self._agent_name_map.get(agent_name, agent_name) + tool_name = self._tool_name_map.get(tool_fn_name) + if tool_name is None: + continue + node = self._graph.get_node(agent_key) if node is None: continue config_tracker = node.get_config().tracker diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 1525fc93..4906a66e 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -6,7 +6,10 @@ from ldai.providers import AgentResult, AgentRunner, ToolRegistry from ldai.providers.types import LDAIMetrics -from ldai_openai.openai_helper import get_ai_usage_from_response, registry_value_to_agent_tool +from ldai_openai.openai_helper import ( + get_ai_usage_from_response, + registry_value_to_agent_tool, +) class OpenAIAgentRunner(AgentRunner): diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 7078a813..05fc6649 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -1,4 +1,3 @@ -import typing from typing import Any, Dict, Iterable, List, Optional, Tuple, cast from ldai import LDMessage @@ -7,7 +6,6 @@ from openai.types.chat import ChatCompletionMessageParam - def convert_messages_to_openai(messages: List[LDMessage]) -> Iterable[ChatCompletionMessageParam]: """ Convert LaunchDarkly messages to OpenAI chat completion message format. @@ -109,43 +107,9 @@ def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: return result -# Native tool raw_item type names don't always match the LD config key convention. -_NATIVE_TOOL_TYPE_TO_CONFIG_KEY = { - 'web_search': 'web_search_tool', -} - -# ``agents.Tool`` is a typing.Union of concrete tool classes, not a runtime class. -# Using ``isinstance(x, Tool)`` raises TypeError (subscripted generics / union checks). -_AGENT_TOOL_TYPES: Optional[Tuple[type, ...]] = None - - -def _concrete_agent_tool_types() -> Tuple[type, ...]: - """Resolve concrete classes behind ``agents.Tool`` (a Union alias).""" - try: - from agents import Tool as ToolUnion - except ImportError: - return () - args = typing.get_args(ToolUnion) - if not args: - return () - out: List[type] = [] - for a in args: - origin = getattr(a, '__origin__', None) - if origin is not None and isinstance(origin, type): - out.append(origin) - elif isinstance(a, type): - out.append(a) - return tuple(out) - - def is_agent_tool_instance(value: Any) -> bool: """True if ``value`` is already an openai-agents tool object (not a plain callable).""" - global _AGENT_TOOL_TYPES - if _AGENT_TOOL_TYPES is None: - _AGENT_TOOL_TYPES = _concrete_agent_tool_types() - if not _AGENT_TOOL_TYPES: - return False - return isinstance(value, _AGENT_TOOL_TYPES) + return not callable(value) def registry_value_to_agent_tool(value: Any) -> Any: @@ -156,6 +120,8 @@ def registry_value_to_agent_tool(value: Any) -> Any: tool instances (e.g. ``WebSearchTool()``, ``FileSearchTool(...)``) are returned unchanged so they are not double-wrapped. """ + if is_agent_tool_instance(value): + return value try: from agents import function_tool except ImportError as exc: @@ -163,12 +129,18 @@ def registry_value_to_agent_tool(value: Any) -> Any: "openai-agents is required for agent tools. " "Install it with: pip install openai-agents" ) from exc - - if is_agent_tool_instance(value): - return value return function_tool(value) +# Native tool response types do not match the SDK or LD tool name; this map aligns them. +# Function tools are omitted—they already arrive as ``ResponseFunctionToolCall.name``. +_RESPONSE_TYPE_TO_TOOL_NAME: Dict[str, str] = { + 'web_search_call': 'web_search', + 'file_search_call': 'file_search', + 'code_interpreter_call': 'code_interpreter', +} + + def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]]: """ Extract (agent_name, tool_name) pairs from RunResult.new_items. @@ -197,9 +169,9 @@ def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]] tool_name = raw.name else: raw_type = getattr(raw, 'type', None) or (raw.get('type') if isinstance(raw, dict) else None) - if not raw_type: + if not isinstance(raw_type, str): continue - tool_name = _NATIVE_TOOL_TYPE_TO_CONFIG_KEY.get(raw_type, raw_type) + tool_name = _RESPONSE_TYPE_TO_TOOL_NAME.get(raw_type, raw_type) if tool_name: result.append((agent_name, tool_name)) return result diff --git a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py index 59ffed5c..931ee59f 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py +++ b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py @@ -109,6 +109,19 @@ def _make_run_result( return result +def _tool_registry(*config_names: str) -> dict: + """Registry entries whose callable __name__ matches runtime tool names from the SDK.""" + + def _stub(name: str): + def fn(): + pass + + fn.__name__ = name + return fn + + return {n: _stub(n) for n in config_names} + + def _make_tool_call_item(agent_name: str, tool_name: str) -> MagicMock: """ Create a mock ToolCallItem with a ResponseFunctionToolCall raw item so that @@ -300,15 +313,15 @@ async def test_tracks_graph_key_on_node_events(): @pytest.mark.asyncio async def test_tracks_tool_calls_from_run_items(): - """A tool_call event fires for each tool found in RunResult.new_items.""" + """A tool_call event fires for tools registered on the graph and in the tool registry.""" mock_ld_client = MagicMock() - graph = _make_graph(mock_ld_client, node_key='root-agent') + graph = _make_graph(mock_ld_client, node_key='root-agent', tool_names=['get_weather']) tool_item = _make_tool_call_item('root-agent', 'get_weather') run_result = _make_run_result(output='done', tool_call_items=[tool_item]) with patch.dict('sys.modules', _make_agents_modules(run_result)): - runner = OpenAIAgentGraphRunner(graph, {}) + runner = OpenAIAgentGraphRunner(graph, _tool_registry('get_weather')) await runner.run('What is the weather?') ev = _events(mock_ld_client) @@ -319,9 +332,11 @@ async def test_tracks_tool_calls_from_run_items(): @pytest.mark.asyncio async def test_tracks_multiple_tool_calls(): - """One tool_call event fires per tool in RunResult.new_items.""" + """One tool_call event fires per registered tool in RunResult.new_items.""" mock_ld_client = MagicMock() - graph = _make_graph(mock_ld_client, node_key='root-agent') + graph = _make_graph( + mock_ld_client, node_key='root-agent', tool_names=['search', 'summarize'] + ) items = [ _make_tool_call_item('root-agent', 'search'), @@ -330,7 +345,7 @@ async def test_tracks_multiple_tool_calls(): run_result = _make_run_result(output='done', tool_call_items=items) with patch.dict('sys.modules', _make_agents_modules(run_result)): - runner = OpenAIAgentGraphRunner(graph, {}) + runner = OpenAIAgentGraphRunner(graph, _tool_registry('search', 'summarize')) await runner.run('Search and summarize.') ev = _events(mock_ld_client) @@ -338,6 +353,23 @@ async def test_tracks_multiple_tool_calls(): assert sorted(tool_keys) == ['search', 'summarize'] +@pytest.mark.asyncio +async def test_does_not_track_tool_calls_without_graph_and_registry_config(): + """RunResult tool items that are not backed by graph + registry tools are ignored.""" + mock_ld_client = MagicMock() + graph = _make_graph(mock_ld_client, node_key='root-agent') + + tool_item = _make_tool_call_item('root-agent', 'orphan_tool') + run_result = _make_run_result(output='done', tool_call_items=[tool_item]) + + with patch.dict('sys.modules', _make_agents_modules(run_result)): + runner = OpenAIAgentGraphRunner(graph, {}) + await runner.run('prompt') + + ev = _events(mock_ld_client) + assert ev.get('$ld:ai:tool_call', []) == [] + + @pytest.mark.asyncio async def test_tracks_failure_and_latency_on_runner_error(): """When Runner.run raises, failure and latency events fire; success does not.""" diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index 589896bd..eecf47bb 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -3,9 +3,9 @@ from ldai import log from ldai.models import AIConfigKind -from ldai.providers.ai_provider import AIProvider from ldai.providers.agent_graph_runner import AgentGraphRunner from ldai.providers.agent_runner import AgentRunner +from ldai.providers.ai_provider import AIProvider from ldai.providers.model_runner import ModelRunner T = TypeVar('T') From 289728857ea97e9ff995cea059b1df8c6f31a0b1 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 08:51:47 -0500 Subject: [PATCH 14/20] add experimental label --- .../server-ai-langchain/pyproject.toml | 4 +- .../ldai_langchain/langchain_agent_runner.py | 7 +- .../langchain_runner_factory.py | 50 ++++++++------ .../ldai_openai/openai_agent_graph_runner.py | 7 +- .../src/ldai_openai/openai_agent_runner.py | 7 +- .../src/ldai_openai/openai_runner_factory.py | 68 +++++++++++-------- packages/sdk/server-ai/src/ldai/client.py | 10 +++ .../src/ldai/providers/agent_graph_runner.py | 5 ++ .../src/ldai/providers/agent_runner.py | 5 ++ .../src/ldai/providers/ai_provider.py | 10 +++ .../src/ldai/providers/runner_factory.py | 10 +++ 11 files changed, 126 insertions(+), 57 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index f2469ac9..abb128a7 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ ] [project.optional-dependencies] -graph = ["langgraph>=0.1.0"] +graph = ["langgraph>=1.0.0"] [project.urls] Homepage = "https://docs.launchdarkly.com/sdk/ai/python" @@ -39,7 +39,7 @@ dev = [ "mypy==1.18.2", "pycodestyle>=2.11.0", "isort>=5.12.0", - "langgraph>=0.1.0", + "langgraph>=1.0.0", ] [build-system] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index 8144dd36..7b799782 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -1,5 +1,3 @@ -"""LangChain agent runner for LaunchDarkly AI SDK.""" - from typing import Any from ldai import log @@ -14,6 +12,11 @@ class LangChainAgentRunner(AgentRunner): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + AgentRunner implementation for LangChain. Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index 6bea7ed0..52cc9a44 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -14,8 +14,38 @@ class LangChainRunnerFactory(AIProvider): """LangChain ``AIProvider`` implementation for the LaunchDarkly AI SDK.""" + def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner: + """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + + Create a configured LangChainAgentRunner for the given AI agent config. + + :param config: The LaunchDarkly AI agent configuration + :param tools: ToolRegistry mapping tool names to callables + :return: LangChainAgentRunner ready to run the agent + """ + from langchain.agents import create_agent as lc_create_agent + instructions = (config.instructions or '') if hasattr(config, 'instructions') else '' + llm = create_langchain_model(config) + lc_tools = build_tools(config, tools or {}) + + agent = lc_create_agent( + llm, + tools=lc_tools or None, + system_prompt=instructions or None, + ) + return LangChainAgentRunner(agent) + def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create a configured LangGraphAgentGraphRunner for the given graph definition. :param graph_def: The AgentGraphDefinition to execute @@ -36,23 +66,3 @@ def create_model(self, config: AIConfigKind) -> LangChainModelRunner: """ llm = create_langchain_model(config) return LangChainModelRunner(llm) - - def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner: - """ - Create a configured LangChainAgentRunner for the given AI agent config. - - :param config: The LaunchDarkly AI agent configuration - :param tools: ToolRegistry mapping tool names to callables - :return: LangChainAgentRunner ready to run the agent - """ - from langchain.agents import create_agent as lc_create_agent - instructions = (config.instructions or '') if hasattr(config, 'instructions') else '' - llm = create_langchain_model(config) - lc_tools = build_tools(config, tools or {}) - - agent = lc_create_agent( - llm, - tools=lc_tools or None, - system_prompt=instructions or None, - ) - return LangChainAgentRunner(agent) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index d93cc1a6..387d4939 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -1,5 +1,3 @@ -"""OpenAI agent graph runner for LaunchDarkly AI SDK.""" - import re import time from typing import Any, Dict, List, Optional @@ -34,6 +32,11 @@ def __init__(self, last_handoff_ns: int, last_node_key: str) -> None: class OpenAIAgentGraphRunner(AgentGraphRunner): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + AgentGraphRunner implementation for the OpenAI Agents SDK. Runs the agent graph with the OpenAI Agents SDK and automatically records diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 4906a66e..216d4dd7 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -1,5 +1,3 @@ -"""OpenAI agent runner for LaunchDarkly AI SDK.""" - from typing import Any, Dict, List from ldai import log @@ -14,6 +12,11 @@ class OpenAIAgentRunner(AgentRunner): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + AgentRunner implementation for OpenAI. Executes a single agent using the OpenAI Agents SDK (``openai-agents``). diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 8d563d33..f3517d92 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -36,37 +36,13 @@ def _extract_model_config(self, config: AIConfigKind) -> tuple: model_dict = config_dict.get('model') or {} return model_dict.get('name', ''), model_dict.get('parameters') or {} - def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: - """ - Create a configured OpenAIModelRunner for the given AI config. - - Reuses the underlying AsyncOpenAI client so connection pooling is preserved. - Tool definitions are converted from LD's flat format to the Chat Completions - API format, with native tools mapped to their correct API type. - - :param config: The LaunchDarkly AI configuration - :return: OpenAIModelRunner ready to invoke the model - """ - model_name, parameters = self._extract_model_config(config) - parameters = dict(parameters) - tool_defs = parameters.pop('tools', None) or [] - if tool_defs: - parameters['tools'] = normalize_tool_types(tool_defs) - return OpenAIModelRunner(self._client, model_name, parameters) - - def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: - """ - Create a configured OpenAIAgentGraphRunner for the given graph definition. - - :param graph_def: The AgentGraphDefinition to execute - :param tools: Registry mapping tool names to callables - :return: OpenAIAgentGraphRunner ready to execute the graph - """ - from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner - return OpenAIAgentGraphRunner(graph_def, tools) - def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> 'OpenAIAgentRunner': """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create a configured OpenAIAgentRunner for the given AI agent config. :param config: The LaunchDarkly AI agent configuration @@ -88,6 +64,40 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> 'Op tools or {}, ) + def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: + """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + + Create a configured OpenAIAgentGraphRunner for the given graph definition. + + :param graph_def: The AgentGraphDefinition to execute + :param tools: Registry mapping tool names to callables + :return: OpenAIAgentGraphRunner ready to execute the graph + """ + from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner + return OpenAIAgentGraphRunner(graph_def, tools) + + def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: + """ + Create a configured OpenAIModelRunner for the given AI config. + + Reuses the underlying AsyncOpenAI client so connection pooling is preserved. + Tool definitions are converted from LD's flat format to the Chat Completions + API format, with native tools mapped to their correct API type. + + :param config: The LaunchDarkly AI configuration + :return: OpenAIModelRunner ready to invoke the model + """ + model_name, parameters = self._extract_model_config(config) + parameters = dict(parameters) + tool_defs = parameters.pop('tools', None) or [] + if tool_defs: + parameters['tools'] = normalize_tool_types(tool_defs) + return OpenAIModelRunner(self._client, model_name, parameters) + def get_client(self) -> AsyncOpenAI: """ Return the underlying AsyncOpenAI client. diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index cdc14c42..9555d63e 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -390,6 +390,11 @@ async def create_agent( default_ai_provider: Optional[str] = None, ) -> Optional[ManagedAgent]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Creates and returns a new ManagedAgent for AI agent invocations. :param key: The key identifying the AI agent configuration to use @@ -675,6 +680,11 @@ async def create_agent_graph( default_ai_provider: Optional[str] = None, ) -> Optional[ManagedAgentGraph]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Creates and returns a new ManagedAgentGraph for AI agent graph execution. Resolves the graph configuration via ``agent_graph()``, creates a diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py index a7bdefee..3de29c37 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py @@ -6,6 +6,11 @@ @runtime_checkable class AgentGraphRunner(Protocol): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Runtime capability interface for multi-agent graph execution. An AgentGraphRunner is a focused, configured object returned by diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_runner.py index d0bcc883..1ada6a2a 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_runner.py @@ -6,6 +6,11 @@ @runtime_checkable class AgentRunner(Protocol): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Runtime capability interface for single-agent execution. An AgentRunner is a focused, configured object returned by diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index 171c50cc..66382d12 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -75,6 +75,11 @@ def create_model(self, config: Any) -> Optional[Any]: def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Optional[Any]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create a configured agent executor for the given AI config and tool registry. Default implementation warns. Provider implementations should override this method. @@ -88,6 +93,11 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Opt def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create a configured agent graph executor for the given graph definition and tools. Default implementation warns. Provider implementations should override this method. diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index eecf47bb..ed28c691 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -137,6 +137,11 @@ def create_agent( default_ai_provider: Optional[str] = None, ) -> Optional[AgentRunner]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create an agent executor for the given AI agent config and tool registry. :param config: LaunchDarkly AI agent config @@ -155,6 +160,11 @@ def create_agent_graph( default_ai_provider: Optional[str] = None, ) -> Optional[AgentGraphRunner]: """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + Create an agent graph executor for the given graph definition and tool registry. :param graph_def: AgentGraphDefinition instance From 4a975dd0e33134707d337772c1ce93ff5bebc5c3 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 08:54:56 -0500 Subject: [PATCH 15/20] missing label --- .../src/ldai_langchain/langgraph_agent_graph_runner.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index b86ad6e8..442a84d8 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -1,5 +1,3 @@ -"""LangGraph agent graph runner for LaunchDarkly AI SDK.""" - import operator import time from typing import Annotated, Any, List @@ -22,6 +20,11 @@ class LangGraphAgentGraphRunner(AgentGraphRunner): """ + CAUTION: + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards + compatibility guarantees. + AgentGraphRunner implementation for LangGraph. Compiles and runs the agent graph with LangGraph and automatically records From 8d990352824cec68230d409c2444a9ba291f5509 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 08:59:49 -0500 Subject: [PATCH 16/20] fix lint --- .../src/ldai_langchain/langchain_agent_runner.py | 4 ++-- .../src/ldai_langchain/langchain_runner_factory.py | 8 ++++---- .../src/ldai_langchain/langgraph_agent_graph_runner.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index 7b799782..1969ec75 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -13,8 +13,8 @@ class LangChainAgentRunner(AgentRunner): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. AgentRunner implementation for LangChain. diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index 52cc9a44..61c85b3d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -17,8 +17,8 @@ class LangChainRunnerFactory(AIProvider): def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> LangChainAgentRunner: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured LangChainAgentRunner for the given AI agent config. @@ -42,8 +42,8 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Lan def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured LangGraphAgentGraphRunner for the given graph definition. diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 442a84d8..7554b2fa 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -21,8 +21,8 @@ class LangGraphAgentGraphRunner(AgentGraphRunner): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. AgentGraphRunner implementation for LangGraph. From 700083cd4002e5fda43cc98fdf1443a151136ca5 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 09:09:45 -0500 Subject: [PATCH 17/20] more lint fixes --- .../src/ldai_openai/openai_agent_graph_runner.py | 4 ++-- .../src/ldai_openai/openai_agent_runner.py | 4 ++-- .../src/ldai_openai/openai_runner_factory.py | 8 ++++---- packages/sdk/server-ai/src/ldai/client.py | 8 ++++---- .../server-ai/src/ldai/providers/agent_graph_runner.py | 4 ++-- packages/sdk/server-ai/src/ldai/providers/agent_runner.py | 4 ++-- packages/sdk/server-ai/src/ldai/providers/ai_provider.py | 8 ++++---- .../sdk/server-ai/src/ldai/providers/runner_factory.py | 8 ++++---- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 387d4939..9729dbb6 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -33,8 +33,8 @@ def __init__(self, last_handoff_ns: int, last_node_key: str) -> None: class OpenAIAgentGraphRunner(AgentGraphRunner): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. AgentGraphRunner implementation for the OpenAI Agents SDK. diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 216d4dd7..7e79c836 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -13,8 +13,8 @@ class OpenAIAgentRunner(AgentRunner): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. AgentRunner implementation for OpenAI. diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index f3517d92..223160c0 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -39,8 +39,8 @@ def _extract_model_config(self, config: AIConfigKind) -> tuple: def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> 'OpenAIAgentRunner': """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured OpenAIAgentRunner for the given AI agent config. @@ -67,8 +67,8 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> 'Op def create_agent_graph(self, graph_def: Any, tools: ToolRegistry) -> Any: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured OpenAIAgentGraphRunner for the given graph definition. diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 9555d63e..9c87ee8a 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -391,8 +391,8 @@ async def create_agent( ) -> Optional[ManagedAgent]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Creates and returns a new ManagedAgent for AI agent invocations. @@ -681,8 +681,8 @@ async def create_agent_graph( ) -> Optional[ManagedAgentGraph]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Creates and returns a new ManagedAgentGraph for AI agent graph execution. diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py index 3de29c37..6cc45670 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py @@ -7,8 +7,8 @@ class AgentGraphRunner(Protocol): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Runtime capability interface for multi-agent graph execution. diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_runner.py index 1ada6a2a..cba7e156 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_runner.py @@ -7,8 +7,8 @@ class AgentRunner(Protocol): """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Runtime capability interface for single-agent execution. diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index 66382d12..70acbe06 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -76,8 +76,8 @@ def create_model(self, config: Any) -> Optional[Any]: def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Optional[Any]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured agent executor for the given AI config and tool registry. @@ -94,8 +94,8 @@ def create_agent(self, config: Any, tools: Optional[ToolRegistry] = None) -> Opt def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create a configured agent graph executor for the given graph definition and tools. diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index ed28c691..4c28334a 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -138,8 +138,8 @@ def create_agent( ) -> Optional[AgentRunner]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create an agent executor for the given AI agent config and tool registry. @@ -161,8 +161,8 @@ def create_agent_graph( ) -> Optional[AgentGraphRunner]: """ CAUTION: - This feature is experimental and should NOT be considered ready for production use. - It may change or be removed without notice and is not subject to backwards + This feature is experimental and should NOT be considered ready for production use. + It may change or be removed without notice and is not subject to backwards compatibility guarantees. Create an agent graph executor for the given graph definition and tool registry. From f46150109929f762418676d7abd91c4fd9da887f Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 09:53:44 -0500 Subject: [PATCH 18/20] address feedback --- .../src/ldai_langchain/langchain_helper.py | 6 +++--- .../server-ai-openai/src/ldai_openai/openai_helper.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py index 7915c9f8..796070e0 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_helper.py @@ -172,12 +172,12 @@ def extract_last_message_content(messages: List[Any]) -> str: Extract the string content of the last message in a list. :param messages: List of LangChain message objects - :return: String content of the last message, or empty string if none + :return: String content of the last message, or empty string if none or content is not a str """ if messages: last = messages[-1] - if hasattr(last, 'content'): - return str(last.content) + if hasattr(last, 'content') and isinstance(last.content, str): + return last.content return '' diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index 05fc6649..d8e06927 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -81,9 +81,9 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: # Tool names that require their own API type in the Chat Completions API. # LD stores all tools as type="function"; these are converted to their correct type. _NATIVE_API_TOOL_NAMES = frozenset({ - 'web_search_tool', + 'web_search', 'file_search', - 'computer_use_preview', + 'tool_search', }) From 35c2eecd2a32c292efb5b84ea2cde4591abd8434 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 10:09:13 -0500 Subject: [PATCH 19/20] adjust how we set tool type --- .../src/ldai_openai/openai_helper.py | 17 ++++++++++------- .../src/ldai_openai/openai_runner_factory.py | 4 ++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index d8e06927..a6daa6bd 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -78,8 +78,7 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: return LDAIMetrics(success=True, usage=get_ai_usage_from_response(response)) -# Tool names that require their own API type in the Chat Completions API. -# LD stores all tools as type="function"; these are converted to their correct type. +# Tool ``name`` values that map to OpenAI hosted-tool ``type`` (same string as ``name``). _NATIVE_API_TOOL_NAMES = frozenset({ 'web_search', 'file_search', @@ -89,11 +88,12 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: """ - Convert LD tool definitions to Chat Completions API format. + Set ``type`` on LD tool definitions for OpenAI hosted tools. - LD emits all tools as ``type="function"`` with a flat structure. This helper - wraps regular function tools in the nested ``function`` key the API requires, - and converts known native tool names to their correct API type without a schema. + When ``name`` is a known OpenAI hosted tool (e.g. ``file_search``), ``type`` is set + to that name; all other keys are left unchanged (``vector_store_ids``, etc. + come from the LD config). Other tools and non-dict entries are passed through + unchanged (non-dicts are skipped). :param tool_definitions: Tool definitions from the LD AI config :return: Tool list ready to pass to ``chat.completions.create`` @@ -103,7 +103,10 @@ def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: if not isinstance(td, dict): continue name = td.get('name', '') - result.append({**td, 'type': name} if name in _NATIVE_API_TOOL_NAMES else td) + if name in _NATIVE_API_TOOL_NAMES: + result.append({**td, 'type': name}) + else: + result.append(td) return result diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 223160c0..a653d1f0 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -85,8 +85,8 @@ def create_model(self, config: AIConfigKind) -> OpenAIModelRunner: Create a configured OpenAIModelRunner for the given AI config. Reuses the underlying AsyncOpenAI client so connection pooling is preserved. - Tool definitions are converted from LD's flat format to the Chat Completions - API format, with native tools mapped to their correct API type. + Hosted tool definitions have ``type`` adjusted from LD's placeholder when + needed; all other fields are passed through from the config. :param config: The LaunchDarkly AI configuration :return: OpenAIModelRunner ready to invoke the model From 4a459ee0619aeb5c999b6af29956aaeecbe14516 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 2 Apr 2026 10:25:25 -0500 Subject: [PATCH 20/20] simplify hosted tool names --- .../src/ldai_openai/openai_helper.py | 24 ++++++++----------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py index a6daa6bd..08da7028 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_helper.py @@ -78,10 +78,12 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: return LDAIMetrics(success=True, usage=get_ai_usage_from_response(response)) -# Tool ``name`` values that map to OpenAI hosted-tool ``type`` (same string as ``name``). -_NATIVE_API_TOOL_NAMES = frozenset({ +# Canonical names for OpenAI hosted tools (LD config / Chat Completions ``type``). +# Agent run items use ``raw_item.type`` with a ``_call`` suffix (e.g. ``web_search_call``). +_OPENAI_HOSTED_TOOL_NAMES = frozenset({ 'web_search', 'file_search', + 'code_interpreter', 'tool_search', }) @@ -103,7 +105,7 @@ def normalize_tool_types(tool_definitions: List[Any]) -> List[Dict[str, Any]]: if not isinstance(td, dict): continue name = td.get('name', '') - if name in _NATIVE_API_TOOL_NAMES: + if name in _OPENAI_HOSTED_TOOL_NAMES: result.append({**td, 'type': name}) else: result.append(td) @@ -135,19 +137,9 @@ def registry_value_to_agent_tool(value: Any) -> Any: return function_tool(value) -# Native tool response types do not match the SDK or LD tool name; this map aligns them. -# Function tools are omitted—they already arrive as ``ResponseFunctionToolCall.name``. -_RESPONSE_TYPE_TO_TOOL_NAME: Dict[str, str] = { - 'web_search_call': 'web_search', - 'file_search_call': 'file_search', - 'code_interpreter_call': 'code_interpreter', -} - - def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]]: """ Extract (agent_name, tool_name) pairs from RunResult.new_items. - Covers both custom FunctionTools (tracked by their config key) and native hosted tools (web search, file search, code interpreter, image generation). @@ -174,7 +166,11 @@ def get_tool_calls_from_run_items(new_items: List[Any]) -> List[Tuple[str, str]] raw_type = getattr(raw, 'type', None) or (raw.get('type') if isinstance(raw, dict) else None) if not isinstance(raw_type, str): continue - tool_name = _RESPONSE_TYPE_TO_TOOL_NAME.get(raw_type, raw_type) + if raw_type.endswith('_call'): + base = raw_type.removesuffix('_call') + tool_name = base if base in _OPENAI_HOSTED_TOOL_NAMES else raw_type + else: + tool_name = raw_type if tool_name: result.append((agent_name, tool_name)) return result