Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
486 changes: 486 additions & 0 deletions personal_assistant/README.md

Large diffs are not rendered by default.

14 changes: 14 additions & 0 deletions personal_assistant/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""Agent entry point for LangGraph deployment.

This file is referenced by langgraph.json and provides the graph for deployment.
It uses absolute imports to avoid relative import issues when loaded by LangGraph.

The for_deployment=True flag ensures we don't pass store/checkpointer to the graph,
allowing LangGraph platform to provide its own persistence infrastructure.
"""

from personal_assistant import create_email_assistant

# Export the graph for deployment
# Use for_deployment=True to let LangGraph platform provide store/checkpointer
graph = create_email_assistant(for_deployment=True)
11 changes: 11 additions & 0 deletions personal_assistant/langgraph.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"dockerfile_lines": [],
"graphs": {
"personal_assistant": "./agent.py:graph"
},
"python_version": "3.11",
"env": ".env",
"dependencies": [
"."
]
}
34 changes: 34 additions & 0 deletions personal_assistant/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
[project]
name = "personal-assistant"
version = "0.1.0"
description = "Email assistant with HITL and memory using deepagents"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"deepagents",
"langchain-anthropic>=1.0.3",
"langgraph-cli[inmem]>=0.1.55",
"langgraph>=1.0.4",
"html2text>=2020.1.16",
"langchain>=1.1.0",
"rich>=10.0.0",
]

[project.optional-dependencies]
dev = [
"jupyter>=1.0.0",
"ipython>=8.0.0",
]

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.hatch.build.targets.wheel]
packages = ["src/personal_assistant"]

[tool.uv]
dev-dependencies = [
"jupyter>=1.0.0",
"ipython>=8.0.0",
]
7 changes: 7 additions & 0 deletions personal_assistant/src/personal_assistant/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""Personal assistant package with HITL and memory using deepagents."""

from .email_assistant_deepagents import create_email_assistant

__version__ = "0.1.0"

__all__ = ["create_email_assistant"]
27 changes: 27 additions & 0 deletions personal_assistant/src/personal_assistant/configuration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
"""Define the configurable parameters for the agent."""

import os
from dataclasses import dataclass, fields
from typing import Any, Optional

from langchain_core.runnables import RunnableConfig

@dataclass(kw_only=True)
class Configuration:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we still need this for this example? Might be easier to remove and simplify

"""Placeholder for configuration."""

@classmethod
def from_runnable_config(
cls, config: Optional[RunnableConfig] = None
) -> "Configuration":
"""Create a Configuration instance from a RunnableConfig."""
configurable = (
config["configurable"] if config and "configurable" in config else {}
)
values: dict[str, Any] = {
f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
for f in fields(cls)
if f.init
}

return cls(**{k: v for k, v in values.items() if v})
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
"""Email assistant using deepagents library with custom HITL middleware.

This is the migration of email_assistant_hitl_memory.py to use the deepagents library's
create_deep_agent() pattern instead of manual graph construction. All functionality is
preserved including HITL logic, memory system, and custom tools.

The agent now handles triage directly through a tool call instead of a separate routing step.

Usage:
python -m examples.personal_assistant.email_assistant_deepagents
"""

from langchain_anthropic import ChatAnthropic
from langgraph.checkpoint.memory import MemorySaver
from langgraph.store.memory import InMemoryStore

from deepagents import create_deep_agent
from deepagents.backends import StoreBackend

from .middleware import MemoryInjectionMiddleware, PostInterruptMemoryMiddleware, GenUIMiddleware
from .schemas import EmailAssistantState
from .tools import get_tools
from .utils import format_email_markdown, parse_email, get_memory
from .prompts import agent_system_prompt_hitl_memory, default_user_profile

def create_email_assistant(for_deployment=False):
"""Create and configure the email assistant agent.

Args:
for_deployment: If True, don't pass store/checkpointer (for LangGraph deployment).
If False, create InMemoryStore and MemorySaver for local testing.

Returns:
CompiledStateGraph: Configured email assistant agent
"""
# Initialize model
model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)

# Get tools - now includes triage_email
tools = get_tools(
[
"triage_email",
"write_email",
"schedule_meeting",
"check_calendar_availability",
"Question",
"Done",
]
)

# Initialize persistence based on deployment mode
if for_deployment:
# In deployment, LangGraph platform provides store and checkpointer
# We need to pass a store to middleware, but it will be overridden by platform
# Use a placeholder that the middleware can work with during initialization
store = InMemoryStore() # Placeholder - will be overridden by platform
store_kwarg = {} # Don't pass store to create_deep_agent
checkpointer_kwarg = {} # Don't pass checkpointer to create_deep_agent
else:
# Local testing mode - create and use our own store and checkpointer
store = InMemoryStore()
checkpointer = MemorySaver()
store_kwarg = {"store": store}
checkpointer_kwarg = {"checkpointer": checkpointer}

# Define interrupt configurations with plain text descriptions
interrupt_on_config = {
"write_email": {
"allowed_decisions": ["approve", "reject"],
"description": "I've drafted an email response. Please review the content, recipients, and subject line below. Approve to send as-is, or Reject to cancel and end the workflow."
},
"schedule_meeting": {
"allowed_decisions": ["approve", "reject"],
"description": "I've prepared a calendar invitation. Please review the meeting details below. Approve to send the invite as-is, or Reject to cancel and end the workflow."
},
"Question": {
"allowed_decisions": ["approve", "reject"],
"description": "I need clarification before proceeding. Please review the question below and provide your response, or Reject to skip this action and end the workflow."
}
}

# Create middleware instances
memory_injection = MemoryInjectionMiddleware(store=store)
post_interrupt_memory = PostInterruptMemoryMiddleware(store=store)
genui = GenUIMiddleware(tool_to_genui_map={
"write_email": {"component_name": "write_email"},
"schedule_meeting": {"component_name": "schedule_meeting"},
"check_calendar_availability": {"component_name": "check_calendar_availability"},
"Question": {"component_name": "question"},
})

# Build system prompt with default user profile
# Note: Memory-based profile can be accessed via the store in middleware
tools_prompt = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
system_prompt = agent_system_prompt_hitl_memory.format(
tools_prompt=tools_prompt,
user_profile=default_user_profile,
)

# Create agent with deepagents library
agent = create_deep_agent(
model=model,
tools=tools,
middleware=[memory_injection, post_interrupt_memory, genui],
backend=lambda rt: StoreBackend(rt),
context_schema=EmailAssistantState,
system_prompt=system_prompt,
interrupt_on=interrupt_on_config, # NEW: Built-in interrupt handling
**store_kwarg,
**checkpointer_kwarg,
)

return agent


def main():
"""Example usage of the email assistant."""
# Create agent
agent = create_email_assistant()

# Example email input
email_input = {
"author": "jane@example.com",
"to": "lance@langchain.dev",
"subject": "Quick question about next week",
"email_thread": "Hi Lance,\n\nCan we meet next Tuesday at 2pm to discuss the project roadmap?\n\nBest,\nJane",
}

# Format email for message
author, to, subject, email_thread = parse_email(email_input)
email_markdown = format_email_markdown(subject, author, to, email_thread)

# Configure thread
config = {"configurable": {"thread_id": "test-thread-1"}}

# Invoke agent
print("=" * 80)
print("EMAIL ASSISTANT EXAMPLE")
print("=" * 80)
print("\nProcessing email:")
print(email_markdown)
print("=" * 80)

# Agent now accepts the email as a simple message string
result = agent.invoke(
{"messages": [{"role": "user", "content": email_markdown}]},
config=config,
)

print("\nAgent result:")
print(result)
print("=" * 80)


if __name__ == "__main__":
main()
11 changes: 11 additions & 0 deletions personal_assistant/src/personal_assistant/middleware/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
"""Custom middleware for email assistant HITL workflow."""

from .email_memory_injection import MemoryInjectionMiddleware
from .email_post_interrupt import PostInterruptMemoryMiddleware
from .email_genui import GenUIMiddleware

__all__ = [
"MemoryInjectionMiddleware",
"PostInterruptMemoryMiddleware",
"GenUIMiddleware",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
"""GenUI middleware for email assistant tool visualization."""

from typing import Annotated, Any, Sequence
from typing_extensions import TypedDict

from langchain.agents.middleware.types import AgentMiddleware, AgentState
from langgraph.graph.ui import AnyUIMessage, ui_message_reducer, push_ui_message
from langgraph.runtime import Runtime


class UIState(AgentState):
"""State schema with UI message support."""
ui: Annotated[Sequence[AnyUIMessage], ui_message_reducer]


class ToolGenUI(TypedDict):
"""Configuration for tool UI component mapping."""
component_name: str


class GenUIMiddleware(AgentMiddleware):
"""Middleware to push UI messages for tool calls.

This middleware runs after the model generates tool calls and pushes
UI messages for configured tools. This enables custom UI components
to render tool calls in the interface.

Args:
tool_to_genui_map: Dict mapping tool names to UI component configurations
"""

state_schema = UIState

def __init__(self, tool_to_genui_map: dict[str, ToolGenUI]):
self.tool_to_genui_map = tool_to_genui_map

def after_model(self, state: UIState, runtime: Runtime) -> dict[str, Any] | None:
"""Push UI messages for tool calls after model generation.

Args:
state: Agent state with messages
runtime: Runtime context

Returns:
None (UI messages are pushed via side effect)
"""
messages = state.get("messages", [])
if not messages:
return

last_message = messages[-1]
if last_message.type != "ai":
return

if last_message.tool_calls:
for tool_call in last_message.tool_calls:
if tool_call["name"] in self.tool_to_genui_map:
component_name = self.tool_to_genui_map[tool_call["name"]]["component_name"]
push_ui_message(
component_name,
{},
metadata={
"tool_call_id": tool_call["id"]
},
message=last_message
)
Loading