Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath-langchain"
version = "0.0.138"
version = "0.0.139"
description = "UiPath Langchain"
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
Expand Down
108 changes: 61 additions & 47 deletions src/uipath_langchain/_cli/cli_eval.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import asyncio
import traceback
from typing import List, Optional

from openinference.instrumentation.langchain import (
Expand All @@ -17,7 +18,8 @@
from uipath.eval._helpers import auto_discover_entrypoint

from uipath_langchain._cli._runtime._context import LangGraphRuntimeContext
from uipath_langchain._cli._runtime._runtime import LangGraphScriptRuntime
from uipath_langchain._cli._runtime._graph_resolver import LangGraphJsonResolverContext
from uipath_langchain._cli._runtime._runtime import LangGraphRuntime
from uipath_langchain._cli._utils._graph import LangGraphConfig
from uipath_langchain._tracing import (
LangChainExporter,
Expand All @@ -35,62 +37,74 @@ def langgraph_eval_middleware(
) # Continue with normal flow if no langgraph.json

try:
_instrument_traceable_attributes()

event_bus = EventBus()

if kwargs.get("register_progress_reporter", False):
progress_reporter = StudioWebProgressReporter(
spans_exporter=LangChainExporter()
)
asyncio.run(progress_reporter.subscribe_to_eval_runtime_events(event_bus))
console_reporter = ConsoleProgressReporter()
asyncio.run(console_reporter.subscribe_to_eval_runtime_events(event_bus))

def generate_runtime_context(
context_entrypoint: str, **context_kwargs
) -> LangGraphRuntimeContext:
context = LangGraphRuntimeContext.with_defaults(**context_kwargs)
context.entrypoint = context_entrypoint
return context
async def execute():
_instrument_traceable_attributes()

runtime_entrypoint = entrypoint or auto_discover_entrypoint()
event_bus = EventBus()

eval_context = UiPathEvalContext.with_defaults(
entrypoint=runtime_entrypoint, **kwargs
)
eval_context.eval_set = eval_set or EvalHelpers.auto_discover_eval_set()
eval_context.eval_ids = eval_ids

def generate_runtime(ctx: LangGraphRuntimeContext) -> LangGraphScriptRuntime:
return LangGraphScriptRuntime(ctx, ctx.entrypoint)

runtime_factory = UiPathRuntimeFactory(
LangGraphScriptRuntime,
LangGraphRuntimeContext,
context_generator=lambda **context_kwargs: generate_runtime_context(
context_entrypoint=runtime_entrypoint,
**context_kwargs,
),
runtime_generator=generate_runtime,
)
if kwargs.get("register_progress_reporter", False):
progress_reporter = StudioWebProgressReporter(
spans_exporter=LangChainExporter()
)
await progress_reporter.subscribe_to_eval_runtime_events(event_bus)

if eval_context.job_id:
runtime_factory.add_span_exporter(LangChainExporter())
console_reporter = ConsoleProgressReporter()
await console_reporter.subscribe_to_eval_runtime_events(event_bus)

runtime_factory.add_instrumentor(LangChainInstrumentor, get_current_span)
runtime_entrypoint = entrypoint or auto_discover_entrypoint()

async def execute():
async with UiPathEvalRuntime.from_eval_context(
factory=runtime_factory, context=eval_context, event_bus=event_bus
) as eval_runtime:
await eval_runtime.execute()
await event_bus.wait_for_all()
eval_context = UiPathEvalContext.with_defaults(
entrypoint=runtime_entrypoint, **kwargs
)
eval_context.eval_set = eval_set or EvalHelpers.auto_discover_eval_set()
eval_context.eval_ids = eval_ids

async with LangGraphJsonResolverContext(
entrypoint=runtime_entrypoint
) as resolver:

def generate_runtime_context(
context_entrypoint: str, **context_kwargs
) -> LangGraphRuntimeContext:
context = LangGraphRuntimeContext.with_defaults(**context_kwargs)
context.entrypoint = context_entrypoint
return context

def generate_runtime(
ctx: LangGraphRuntimeContext,
) -> LangGraphRuntime:
return LangGraphRuntime(ctx, resolver)

runtime_factory = UiPathRuntimeFactory(
LangGraphRuntime,
LangGraphRuntimeContext,
context_generator=lambda **context_kwargs: generate_runtime_context(
context_entrypoint=runtime_entrypoint,
**context_kwargs,
),
runtime_generator=generate_runtime,
)

if eval_context.job_id:
runtime_factory.add_span_exporter(LangChainExporter())

runtime_factory.add_instrumentor(
LangChainInstrumentor, get_current_span
)

async with UiPathEvalRuntime.from_eval_context(
factory=runtime_factory, context=eval_context, event_bus=event_bus
) as eval_runtime:
await eval_runtime.execute()
await event_bus.wait_for_all()

asyncio.run(execute())
return MiddlewareResult(should_continue=False)

except Exception as e:
tb = traceback.format_exc()
return MiddlewareResult(
should_continue=False, error_message=f"Error running evaluation: {str(e)}"
should_continue=False,
error_message=f"Error running evaluations: {repr(e)}\n{tb}",
)