Skip to content
Closed

V2 #11

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,11 @@ PRs welcome for:
- [ ] ~~Keep alive SSE connection until the user closes the browser tab (??)~~
- [ ] 🟡 Add a way to validate the user's access token (OAuth2)
- [ ] 🟡 Add evaluation metrics
- [ ] 🔴 Add *one more* abstraction layer so the agent can use different frameworks (LangGraph, LlamaIndex, etc.)
- [x] 🔴 Add *one more* abstraction layer so the agent can use different frameworks (LangGraph, LlamaIndex, etc.)
- [ ] 🟠 Add even more fucking abstractions to make it independent of observability tools (LangFuse, LangSmith, Grafana
Alloy, or whatever the fuck else)
- [ ] ⚪ Long-Term memory for each user. I want to add to chat application for real-time per thread prompt tuning - memory
- [ ] ⚪ Long-Term memory for each user. I want to add to chat application for real-time per thread prompt tuning -
memory
insights, response strategies, etc. But this is more about agent implementation not template core. Graph node as "
addon package?" LOL! https://i.imgur.com/k1jk3cx.png here we go again!
- [ ] ⚪ Guardrails ([LLMGuard implementation](https://github.com/assada/agent_template/tree/feat/guardrails) or handle
Expand Down
13 changes: 13 additions & 0 deletions agents/config/demo_agent.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
{
"name": "demo_agent",
"description": "Demo conversational agent with tool support",
"production": {
"framework": "langgraph",
"class_name": "agents.langgraph.demo.demo_graph.DemoGraph",
"prompt_source": "langfuse",
"custom_settings": {
"default_system_prompt": "You are a helpful AI assistant with access to tools.",
"max_iterations": 10
}
}
}
24 changes: 24 additions & 0 deletions agents/config/demo_llamaindex_agent.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"name": "demo_llamaindex_agent",
"description": "Demo LlamaIndex agent with weather tool support",
"production": {
"framework": "llamaindex",
"class_name": "agents.llamaindex.demo.demo_agent.DemoLlamaIndexAgent",
"prompt_source": "file",
"custom_settings": {
"default_system_prompt": "You are a helpful AI assistant with access to weather information. When users ask about weather, use the get_weather tool to provide accurate information.",
"max_iterations": 10,
"verbose": false
}
},
"development": {
"framework": "llamaindex",
"class_name": "agents.llamaindex.demo.demo_agent.DemoLlamaIndexAgent",
"prompt_source": "file",
"custom_settings": {
"default_system_prompt": "You are a helpful AI assistant with access to weather information. When users ask about weather, use the get_weather tool to provide accurate information.",
"max_iterations": 5,
"verbose": false
}
}
}
File renamed without changes.
File renamed without changes.
Empty file added agents/langgraph/__init__.py
Empty file.
3 changes: 3 additions & 0 deletions agents/langgraph/demo/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .demo_graph import DemoGraph

__all__ = ["DemoGraph"]
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,29 @@
from typing import Any, Literal

from langchain_core.messages import AIMessage

from app.agent.frameworks.langgraph_framework import Graph
from app.agent.frameworks.langgraph_framework.base_state import BaseState, State
from app.agent.prompt import PromptProvider
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.graph import START, StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.prebuilt import ToolNode

from app.agent.langgraph import Graph
from app.agent.langgraph.base_state import BaseState, State
from app.agent.langgraph.demo.tools.tools import TOOLS
from app.agent.prompt import PromptProvider
from .tools import TOOLS

logger = logging.getLogger(__name__)


class DemoGraph(Graph):
def __init__(
self, checkpointer: BaseCheckpointSaver[Any], prompt_provider: PromptProvider
self, checkpointer: BaseCheckpointSaver[Any], prompt_provider: PromptProvider, custom_config: dict[str, Any] | None = None
):
super().__init__(checkpointer, prompt_provider)

@property
def graph_name(self) -> str:
return "demo_graph"
return "demo_agent"

def get_tools(self) -> list[Any]:
return TOOLS
Expand Down
5 changes: 5 additions & 0 deletions agents/langgraph/demo/tools/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .tools import TOOLS

__all__ = [
"TOOLS",
]
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from collections.abc import Callable
from typing import Any

from langgraph.config import get_stream_writer

from app.agent.models import CustomUIMessage
from langgraph.config import get_stream_writer


async def get_weather(city: str) -> str:
Expand All @@ -24,7 +23,13 @@ async def get_weather(city: str) -> str:
)
)

return f"It's always sunny in {city}!"
import random

weather_conditions = ["sunny", "cloudy", "rainy", "snowy", "partly cloudy"]
temperature = random.randint(-5, 35)
condition = random.choice(weather_conditions)

return f"The weather in {city} is {condition} and {temperature}°C!"


TOOLS: list[Callable[..., Any]] = [get_weather]
Empty file added agents/llamaindex/.gitkeep
Empty file.
Empty file added agents/llamaindex/__init__.py
Empty file.
Empty file.
203 changes: 203 additions & 0 deletions agents/llamaindex/demo/demo_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
from __future__ import annotations

import asyncio
import json
import logging
from collections.abc import AsyncGenerator
from typing import Any

from llama_index.core.agent.workflow import BaseWorkflowAgent, ReActAgent
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.workflow import Event
from workflows.handler import WorkflowHandler

from app.agent.prompt import Prompt, PromptProvider
from app.agent.services.events import EndEvent, ErrorEvent, TokenEvent
from app.agent.services.events.base_event import BaseEvent
from app.models import Thread, User

from .tools import TOOLS

logger = logging.getLogger(__name__)


class DemoLlamaIndexAgent:
def __init__(
self,
prompt_provider: PromptProvider,
custom_settings: dict[str, Any] | None = None
):
self.prompt_provider = prompt_provider
self.custom_settings = custom_settings or {}
self.agent: BaseWorkflowAgent | None = None

def _get_model(self, prompt: Prompt) -> FunctionCallingLLM:
from llama_index.llms.openai import OpenAI

config = getattr(prompt, "config", {}) or {}
model_config = config.get("model", "openai/gpt-4o-mini")

if "/" in model_config:
provider, model = model_config.split("/", 1)
else:
provider, model = "openai", model_config

if provider == "openai":
return OpenAI(
model=model,
temperature=config.get("temperature", 0.7),
max_tokens=config.get("max_tokens", 4096),
streaming=True
)
else:
raise ValueError(f"Unsupported provider: {provider}")

def _get_prompt_fallback(self) -> Prompt:
return Prompt(
content="You are a helpful AI assistant with access to tools. Use the tools when needed to answer user questions.",
config={
"model": "openai/gpt-4o-mini",
"temperature": 0.7,
"max_tokens": 4096,
},
)

def _initialize_agent(self) -> BaseWorkflowAgent:
if self.agent is not None:
return self.agent

prompt = self.prompt_provider.get_prompt(
"demo_llamaindex_agent", "production", self._get_prompt_fallback()
)

llm = self._get_model(prompt)

self.agent = ReActAgent(
tools=TOOLS,
llm=llm,
verbose=self.custom_settings.get("verbose", True),
system_prompt=prompt.content
)

return self.agent

async def stream_response(
self, message: str, thread: Thread, user: User
) -> AsyncGenerator[dict[str, Any]]:
try:
agent = self._initialize_agent()
handler = agent.run(user_msg=message)

async def get_result() -> WorkflowHandler:
return await handler

run_task = asyncio.create_task(get_result())

streaming_content = ""
has_content = False

async for event in agent.stream_events():
processed_event = self._process_workflow_event(event, thread, user)
if processed_event:
if isinstance(processed_event, TokenEvent):
token_data = json.loads(processed_event.data)
streaming_content += token_data.get("content", "")
has_content = True

yield processed_event.model_dump()

final_response = await run_task

if not has_content and final_response:
message_event = BaseEvent.from_payload(
event="message",
payload={
"type": "ai_message",
"content": str(final_response),
"metadata": {
"agent": "demo_llamaindex_agent",
"framework": "llamaindex",
"user_id": str(user.id),
"thread_id": str(thread.id)
}
}
)
yield message_event.model_dump()

except Exception as e:
logger.error(f"Error in LlamaIndex agent streaming: {e}")
error_event = ErrorEvent(
data=json.dumps({
"content": f"Error processing request: {str(e)}",
"error_type": type(e).__name__
})
)
yield error_event.model_dump()
finally:
end_event = EndEvent(data=json.dumps({"status": "completed"}))
yield end_event.model_dump()

def _process_workflow_event(self, event: Event, thread: Thread, user: User) -> BaseEvent | None:
"""Process workflow events and convert to our event format."""
try:
event_type = type(event).__name__

if hasattr(event, 'delta') and event.delta:
return TokenEvent(
data=json.dumps({
"content": str(event.delta),
"metadata": {
"agent": "demo_llamaindex_agent",
"framework": "llamaindex",
"event_type": event_type
}
})
)

elif hasattr(event, 'content') and event.content:
return BaseEvent.from_payload(
event="message",
payload={
"type": "ai_message",
"content": str(event.content),
"metadata": {
"agent": "demo_llamaindex_agent",
"framework": "llamaindex",
"event_type": event_type,
"user_id": str(user.id),
"thread_id": str(thread.id)
}
}
)

elif hasattr(event, 'tool_calls') and event.tool_calls:
return BaseEvent.from_payload(
event="tool_call",
payload={
"tool_calls": [str(call) for call in event.tool_calls],
"metadata": {
"agent": "demo_llamaindex_agent",
"framework": "llamaindex",
"event_type": event_type
}
}
)

else:
logger.debug(f"Workflow event {event_type}: {event}")
return None

except Exception as e:
logger.warning(f"Error processing workflow event: {e}")
return None

async def load_history(
self, thread: Thread, user: User
) -> AsyncGenerator[dict[str, Any]]:
"""Load conversation history."""
# For demo purposes, return empty history
# In real implementation, you'd load from agent's chat history
logger.info(f"Loading history for thread {thread.id} with LlamaIndex agent")

end_event = EndEvent(data=json.dumps({"status": "completed", "message": "No history available"}))
yield end_event.model_dump()
32 changes: 32 additions & 0 deletions agents/llamaindex/demo/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@

from llama_index.core.tools import FunctionTool


async def get_weather(city: str) -> str:
"""Get weather for a given city.

Args:
city: The name of the city to get weather for

Returns:
Weather information for the city
"""
# For demo purposes, return mock weather data
# In real implementation, this would call a weather API
import random
weather_conditions = ["sunny", "cloudy", "rainy", "snowy", "partly cloudy"]
temperature = random.randint(-5, 35)
condition = random.choice(weather_conditions)

return f"The weather in {city} is {condition} and {temperature}°C!"


def create_weather_tool() -> FunctionTool:
return FunctionTool.from_defaults(
fn=get_weather,
name="get_weather",
description="Get current weather information for a specific city"
)


TOOLS = [create_weather_tool()]
26 changes: 26 additions & 0 deletions agents/prompt/demo_agent.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
{
"production": {
"content": "You are a helpful AI assistant with access to tools. Use the tools when needed to provide accurate and helpful responses to user questions.",
"config": {
"model": "openai/gpt-4o-mini",
"temperature": 0.7,
"max_tokens": 4096
}
},
"development": {
"content": "You are a development AI assistant. You have access to tools and should use them when helpful. Keep responses concise for testing purposes.",
"config": {
"model": "openai/gpt-3.5-turbo",
"temperature": 0.5,
"max_tokens": 2048
}
},
"staging": {
"content": "You are a staging AI assistant with tool access. Provide thorough responses while testing new features.",
"config": {
"model": "openai/gpt-4o-mini",
"temperature": 0.6,
"max_tokens": 3072
}
}
}
Loading