diff --git a/examples/openai-agents/README.md b/examples/openai-agents/README.md new file mode 100644 index 000000000..111c638b2 --- /dev/null +++ b/examples/openai-agents/README.md @@ -0,0 +1,181 @@ +# Honcho Memory Integration for the OpenAI Agents SDK + +Give your [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) agents persistent memory using [Honcho](https://honcho.dev). + +## Features + +- **Persistent Memory**: Every conversation turn is saved to Honcho and automatically injected into the agent's instructions on the next turn. +- **Natural Language Recall**: The agent can query Honcho's Dialectic API to answer questions like "What are my hobbies?" or "What did we talk about last time?" +- **Context Injection**: Conversation history is retrieved from Honcho and formatted for the LLM before every request via dynamic `instructions`. +- **Zero Boilerplate**: Pass a `HonchoContext` to `Runner.run()` — the tools and instructions handle the rest. + +## Installation + +```bash +pip install honcho-ai openai-agents python-dotenv +``` + +Or with uv: + +```bash +uv add honcho-ai openai-agents python-dotenv +``` + +## Environment Variables + +Create a `.env` file: + +```env +HONCHO_API_KEY=your-honcho-api-key +HONCHO_WORKSPACE_ID=default +OPENAI_API_KEY=your-openai-api-key +``` + +Get your Honcho API key at [honcho.dev](https://honcho.dev). + +## Quick Start + +```python +import asyncio +from agents import Agent, RunContextWrapper, Runner +from tools.client import HonchoContext +from tools.get_context import get_context +from tools.query_memory import query_memory +from tools.save_memory import save_memory + + +def honcho_instructions(ctx: RunContextWrapper[HonchoContext], agent: Agent) -> str: + base = "You are a helpful assistant with persistent memory powered by Honcho." + history = get_context(ctx.context, tokens=2000) + if not history: + return base + formatted = "\n".join(f"{m['role'].title()}: {m['content']}" for m in history) + return f"{base}\n\n## Conversation History\n{formatted}" + + +agent = Agent[HonchoContext]( + name="HonchoMemoryAgent", + instructions=honcho_instructions, + tools=[query_memory], + model="gpt-4.1-mini", +) + + +async def chat(user_id: str, message: str, session_id: str) -> str: + ctx = HonchoContext(user_id=user_id, session_id=session_id) + save_memory(user_id, message, "user", session_id) + result = await Runner.run(agent, message, context=ctx) + response = str(result.final_output) + save_memory(user_id, response, "assistant", session_id) + return response + + +# Run a conversation turn +response = asyncio.run(chat("alice", "I love hiking in the mountains", "session-1")) +print(response) +``` + +Run the interactive demo: + +```bash +python main.py +``` + +## How It Works + +### 1. Dynamic Instructions + +The agent uses a callable `instructions` function instead of a static string. Before every LLM call, the SDK invokes this function with the current `RunContextWrapper`. The function calls `get_context()` to fetch recent messages from Honcho and injects them into the system prompt: + +``` +You are a helpful assistant with persistent memory powered by Honcho. + +## Conversation History +User: I love hiking +Assistant: That sounds wonderful! Do you have a favorite trail? +``` + +### 2. Memory Tools + +The `query_memory` tool is exposed to the LLM via `@function_tool`. When the user asks "What do you remember about me?", the agent calls this tool to query Honcho's Dialectic API — a semantic memory layer that synthesizes observations about the user into a natural language answer. + +### 3. Auto-Save + +The `chat()` helper in `main.py` wraps `Runner.run()` to save the user message before the run and the assistant response after. This keeps Honcho in sync with every conversation turn. + +## API Reference + +### `HonchoContext` + +```python +@dataclass +class HonchoContext: + user_id: str # Unique identifier for the human peer + session_id: str # Identifier for the current conversation session + assistant_id: str # Peer ID for the assistant (default: "assistant") +``` + +Pass this as the `context` argument to `Runner.run()`. + +--- + +### `save_memory(user_id, content, role, session_id, assistant_id="assistant")` + +Saves a message to Honcho. Creates the peer and session if they don't exist. + +| Param | Type | Description | +|---|---|---| +| `user_id` | `str` | Unique user identifier | +| `content` | `str` | Message text | +| `role` | `str` | `"user"` or `"assistant"` | +| `session_id` | `str` | Session identifier | +| `assistant_id` | `str` | Peer ID for the assistant (default: `"assistant"`) | + +--- + +### `get_context(ctx, tokens=2000)` + +Returns recent conversation history from Honcho as OpenAI-format message dicts. + +| Param | Type | Description | +|---|---|---| +| `ctx` | `HonchoContext` | Context with user, session, and assistant IDs | +| `tokens` | `int` | Max tokens to include (default: `2000`) | + +Returns `list[dict[str, str]]` — suitable for direct use as LLM input. + +--- + +### `query_memory` (agent tool) + +A `@function_tool` decorated function the agent calls to query Honcho's Dialectic API. + +| Param | Type | Description | +|---|---|---| +| `ctx` | `RunContextWrapper[HonchoContext]` | Injected automatically by the SDK | +| `query` | `str` | Natural language question about the user | + +Returns a natural language answer from Honcho's memory. + +## Concept Mapping + +| OpenAI Agents SDK | Honcho | +|---|---| +| `context.user_id` | Peer (human) | +| `context.assistant_id` | Peer (agent) | +| `context.session_id` | Session | +| `Runner.run()` input | Message | + +## Running Tests + +```bash +# Structural tests (no API keys required) +pytest tests/test_basic.py -v + +# Integration tests (requires HONCHO_API_KEY) +pytest tests/test_integration.py -v +``` + +## License + +AGPL-3.0-or-later diff --git a/examples/openai-agents/python/main.py b/examples/openai-agents/python/main.py new file mode 100644 index 000000000..5b59f50e4 --- /dev/null +++ b/examples/openai-agents/python/main.py @@ -0,0 +1,135 @@ +"""OpenAI Agents SDK integration with Honcho persistent memory. + +Demonstrates a conversational agent that remembers users across sessions. +Honcho stores every message and builds a long-term representation of the user; +the agent injects that context into its instructions on every turn and can +query memory on demand via the ``query_memory`` tool. + +Usage: + python main.py + +Environment variables: + HONCHO_API_KEY Required. Your Honcho API key from honcho.dev. + HONCHO_WORKSPACE_ID Optional. Workspace ID (default: "default"). + OPENAI_API_KEY Required. Your OpenAI API key. +""" + +import asyncio +import logging +import uuid + +from agents import Agent, RunContextWrapper, Runner +from honcho.http.exceptions import HonchoError + +from tools.client import HonchoContext, get_client +from tools.get_context import get_context +from tools.query_memory import query_memory +from tools.save_memory import save_memory + +logger = logging.getLogger(__name__) + + +def setup_session(user_id: str, session_id: str, assistant_id: str = "assistant") -> None: + """Register peers in the session once at startup. + + Should be called once before the conversation loop begins. Calling + ``add_peers`` on every turn is redundant — this ensures peers are + registered exactly once. + + Args: + user_id: Unique identifier for the user peer. + session_id: Identifier for the conversation session. + assistant_id: Peer ID for the assistant. Defaults to ``"assistant"``. + + Raises: + RuntimeError: If the Honcho API call fails. + """ + try: + honcho = get_client() + user_peer = honcho.peer(user_id) + assistant_peer = honcho.peer(assistant_id) + session = honcho.session(session_id) + session.add_peers([user_peer, assistant_peer]) + except HonchoError as exc: + raise RuntimeError("Failed to initialize Honcho session peers") from exc + + +honcho_agent = Agent[HonchoContext]( + name="HonchoMemoryAgent", + instructions=( + "You are a helpful assistant with persistent memory powered by Honcho. " + "You remember users across conversations. " + "When a user asks what you remember about them, use the query_memory tool." + ), + tools=[query_memory], + model="gpt-4.1-mini", +) + + +async def chat( + user_id: str, + message: str, + session_id: str, + assistant_id: str = "assistant", +) -> str: + """Run one conversation turn with persistent Honcho memory. + + Fetches prior history first, then saves the user message, runs the agent, + and saves the assistant reply. History is read before persisting the current + turn to avoid duplicating it in the prompt. + + Args: + user_id: Unique identifier for the user. + message: The user's input message. + session_id: Identifier for the current conversation session. + assistant_id: Peer ID for the assistant. Defaults to ``"assistant"``. + + Returns: + The agent's response as a string. + """ + ctx = HonchoContext(user_id=user_id, session_id=session_id, assistant_id=assistant_id) + + # Fetch prior history BEFORE saving the current turn to avoid duplicating it + try: + history = get_context(ctx, tokens=2000) + except HonchoError as exc: + logger.warning("Could not load Honcho context; continuing without history: %s", exc) + history = [] + input_messages = [*history, {"role": "user", "content": message}] + + # Persist user message before the agent runs + try: + save_memory(user_id, message, "user", session_id, assistant_id=assistant_id) + except HonchoError as exc: + logger.warning("Could not persist user message: %s", exc) + + result = await Runner.run(honcho_agent, input_messages, context=ctx) + response = str(result.final_output) + + # Persist assistant response after the run + try: + save_memory(user_id, response, "assistant", session_id, assistant_id=assistant_id) + except HonchoError as exc: + logger.warning("Could not persist assistant message: %s", exc) + + return response + + +if __name__ == "__main__": + print("HonchoMemoryAgent — type 'quit' to exit\n") + # Replace "demo-user" with a real user identifier in production. + _user_id = "demo-user" + # A fresh session ID per run prevents history from accumulating across runs. + _session_id = str(uuid.uuid4()) + + # Register peers once at session start — not on every turn. + setup_session(_user_id, _session_id) + + while True: + _user_input = input("You: ").strip() + if not _user_input: + continue + if _user_input.lower() in ("quit", "exit"): + break + _response = asyncio.run(chat(_user_id, _user_input, _session_id)) + print(f"Agent: {_response}\n") diff --git a/examples/openai-agents/python/pyproject.toml b/examples/openai-agents/python/pyproject.toml new file mode 100644 index 000000000..d1519c5f3 --- /dev/null +++ b/examples/openai-agents/python/pyproject.toml @@ -0,0 +1,26 @@ +[project] +name = "honcho-openai-agents" +version = "0.1.0" +description = "Honcho persistent memory integration for the OpenAI Agents SDK" +readme = "README.md" +requires-python = ">=3.9" +dependencies = [ + "honcho-ai>=2.1.0", + "openai-agents>=0.0.3", + "python-dotenv>=1.0.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["tools"] + +[tool.pytest.ini_options] +pythonpath = ["."] diff --git a/examples/openai-agents/python/tests/test_basic.py b/examples/openai-agents/python/tests/test_basic.py new file mode 100644 index 000000000..2d83f7694 --- /dev/null +++ b/examples/openai-agents/python/tests/test_basic.py @@ -0,0 +1,95 @@ +"""Basic import and structure tests for the OpenAI Agents SDK integration. + +These tests validate package structure and tool signatures without requiring +a running Honcho server or any API keys. +""" + +import os +import sys + +import pytest + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +def test_save_memory_import(): + """Test that save_memory can be imported.""" + from tools.save_memory import save_memory + + assert callable(save_memory) + + +def test_query_memory_import(): + """Test that query_memory can be imported.""" + from tools.query_memory import query_memory + + assert callable(query_memory) + + +def test_get_context_import(): + """Test that get_context can be imported.""" + from tools.get_context import get_context + + assert callable(get_context) + + +def test_tools_package_exports(): + """Test that the tools package exports all three public symbols.""" + import tools + + assert hasattr(tools, "save_memory") + assert hasattr(tools, "query_memory") + assert hasattr(tools, "get_context") + + +def test_tools_all_list(): + """Test that __all__ contains the expected exports.""" + import tools + + assert hasattr(tools, "__all__") + for name in ("get_context", "query_memory", "save_memory"): + assert name in tools.__all__, f"{name!r} missing from __all__" + + +def test_honcho_context_dataclass(): + """Test that HonchoContext can be instantiated with required fields.""" + from tools.client import HonchoContext + + ctx = HonchoContext(user_id="alice", session_id="session-1") + assert ctx.user_id == "alice" + assert ctx.session_id == "session-1" + assert ctx.assistant_id == "assistant" + + +def test_honcho_context_custom_assistant_id(): + """Test that HonchoContext accepts a custom assistant_id.""" + from tools.client import HonchoContext + + ctx = HonchoContext(user_id="alice", session_id="s1", assistant_id="my-bot") + assert ctx.assistant_id == "my-bot" + + +def test_save_memory_raises_on_empty_content(): + """Test that save_memory raises ValueError for empty content.""" + from tools.save_memory import save_memory + + with pytest.raises(ValueError, match="content must not be empty"): + save_memory("user1", "", "user", "session1") + + +def test_query_memory_is_function_tool(): + """Test that query_memory is decorated as an OpenAI Agents function tool.""" + from agents import FunctionTool + + from tools.query_memory import query_memory + + assert isinstance(query_memory, FunctionTool) + + +def test_main_module_imports(): + """Test that main.py can be imported and exposes the agent and chat function.""" + import main + + assert hasattr(main, "honcho_agent") + assert hasattr(main, "chat") + assert callable(main.chat) diff --git a/examples/openai-agents/python/tests/test_integration.py b/examples/openai-agents/python/tests/test_integration.py new file mode 100644 index 000000000..9e99dc3db --- /dev/null +++ b/examples/openai-agents/python/tests/test_integration.py @@ -0,0 +1,165 @@ +"""Integration tests for the OpenAI Agents SDK + Honcho memory integration. + +These tests run against the live Honcho API and require ``HONCHO_API_KEY`` +to be set. They are skipped automatically when the key is absent. +""" + +import os +import sys +import time +import uuid + +import pytest + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from tools.client import HonchoContext +from tools.get_context import get_context +from tools.save_memory import save_memory + +pytestmark = pytest.mark.skipif( + not os.getenv("HONCHO_API_KEY"), + reason="HONCHO_API_KEY not set — skipping integration tests", +) + + +@pytest.fixture(autouse=True) +def rate_limit_delay(): + """Pause between tests to stay under the Honcho API rate limit.""" + yield + time.sleep(0.5) + + +def unique_id(prefix: str) -> str: + """Return a unique prefixed ID to avoid cross-test state leakage.""" + return f"{prefix}_{uuid.uuid4().hex[:8]}" + + +class TestSaveMemory: + """Tests for the save_memory helper.""" + + def test_returns_confirmation_string(self): + result = save_memory(unique_id("user"), "Hello!", "user", unique_id("session")) + assert isinstance(result, str) and len(result) > 0 + + def test_saves_user_message(self): + user_id = unique_id("user") + result = save_memory(user_id, "I enjoy hiking", "user", unique_id("session")) + assert user_id in result or "user" in result.lower() + + def test_saves_assistant_message(self): + result = save_memory( + unique_id("user"), "Great to hear!", "assistant", unique_id("session") + ) + assert isinstance(result, str) and len(result) > 0 + + def test_saves_multiple_turns_same_session(self): + user_id = unique_id("user") + session_id = unique_id("session") + r1 = save_memory(user_id, "I love jazz music", "user", session_id) + r2 = save_memory(user_id, "Jazz is wonderful!", "assistant", session_id) + assert len(r1) > 0 and len(r2) > 0 + + def test_non_assistant_role_treated_as_user(self): + result = save_memory( + unique_id("user"), "Testing role fallback", "human", unique_id("session") + ) + assert isinstance(result, str) and len(result) > 0 + + def test_custom_assistant_id(self): + result = save_memory( + unique_id("user"), + "Hello!", + "assistant", + unique_id("session"), + assistant_id="my-bot", + ) + assert isinstance(result, str) and len(result) > 0 + + +class TestGetContext: + """Tests for the get_context helper.""" + + def test_returns_list(self): + user_id = unique_id("user") + session_id = unique_id("session") + save_memory(user_id, "Hello there!", "user", session_id) + + ctx = HonchoContext(user_id=user_id, session_id=session_id) + result = get_context(ctx) + assert isinstance(result, list) + + def test_returns_openai_format(self): + user_id = unique_id("user") + session_id = unique_id("session") + save_memory(user_id, "My name is Alex", "user", session_id) + save_memory(user_id, "Nice to meet you, Alex!", "assistant", session_id) + + ctx = HonchoContext(user_id=user_id, session_id=session_id) + result = get_context(ctx) + + assert isinstance(result, list) + for msg in result: + assert "role" in msg and "content" in msg + assert msg["role"] in ("user", "assistant", "system") + assert isinstance(msg["content"], str) + + def test_empty_session_returns_list(self): + ctx = HonchoContext(user_id=unique_id("user"), session_id=unique_id("session")) + result = get_context(ctx) + assert isinstance(result, list) + + def test_respects_token_limit(self): + user_id = unique_id("user") + session_id = unique_id("session") + # Save enough content that a 50-token budget will truncate it + for i in range(10): + save_memory( + user_id, + f"Message {i}: " + "This is a longer sentence to consume tokens. " * 5, + "user", + session_id, + ) + + ctx = HonchoContext(user_id=user_id, session_id=session_id) + + # Wait until at least some messages are visible (async processing) + large: list = [] + for _ in range(10): + large = get_context(ctx, tokens=8000) + if len(large) > 0: + break + time.sleep(1) + + small = get_context(ctx, tokens=50) + + assert isinstance(small, list) and isinstance(large, list) + assert len(large) > len(small), "50-token budget should return fewer messages than 8000" + + +class TestSaveGetRoundtrip: + """End-to-end tests combining save_memory and get_context.""" + + def test_saved_messages_appear_in_context(self): + user_id = unique_id("user") + session_id = unique_id("session") + user_content = "Hello from the integration test!" + assistant_content = "Hi there, integration test!" + + save_memory(user_id, user_content, "user", session_id) + save_memory(user_id, assistant_content, "assistant", session_id) + + ctx = HonchoContext(user_id=user_id, session_id=session_id) + + # Retry briefly — Honcho processes messages asynchronously + messages = [] + for _ in range(5): + messages = get_context(ctx) + contents = [m["content"] for m in messages] + if user_content in contents and assistant_content in contents: + break + time.sleep(1) + + contents = [m["content"] for m in messages] + assert user_content in contents, "User message not found in context" + assert assistant_content in contents, "Assistant message not found in context" diff --git a/examples/openai-agents/python/tools/__init__.py b/examples/openai-agents/python/tools/__init__.py new file mode 100644 index 000000000..457662544 --- /dev/null +++ b/examples/openai-agents/python/tools/__init__.py @@ -0,0 +1,7 @@ +"""Honcho memory tools for the OpenAI Agents SDK integration.""" + +from .get_context import get_context +from .query_memory import query_memory +from .save_memory import save_memory + +__all__ = ["get_context", "query_memory", "save_memory"] diff --git a/examples/openai-agents/python/tools/client.py b/examples/openai-agents/python/tools/client.py new file mode 100644 index 000000000..15830ef77 --- /dev/null +++ b/examples/openai-agents/python/tools/client.py @@ -0,0 +1,58 @@ +"""Honcho client initialization and context for OpenAI Agents SDK integration.""" + +from __future__ import annotations + +import os +from dataclasses import dataclass, field + +from dotenv import load_dotenv +from honcho import Honcho + +load_dotenv() + + +@dataclass +class HonchoContext: + """Holds Honcho identity for a single conversation turn. + + Pass this as the ``context`` argument to ``Runner.run()``. Tools and the + dynamic ``instructions`` callable read from it to resolve the correct peer + and session without requiring global state. + + Attributes: + user_id: Unique identifier for the human peer. + session_id: Identifier for the current conversation session. + assistant_id: Peer ID for the assistant. Defaults to ``"assistant"``. + """ + + user_id: str + session_id: str + assistant_id: str = field(default="assistant") + + +def get_client(workspace_id: str | None = None) -> Honcho: + """Initialize and return a Honcho client. + + Reads ``HONCHO_API_KEY`` and ``HONCHO_WORKSPACE_ID`` from environment + variables. The ``workspace_id`` parameter overrides the environment + variable if provided. + + Args: + workspace_id: Optional workspace ID override. Falls back to the + ``HONCHO_WORKSPACE_ID`` env var, then to ``"default"``. + + Returns: + Configured Honcho client instance. + + Raises: + ValueError: If ``HONCHO_API_KEY`` is not set. + """ + api_key = os.getenv("HONCHO_API_KEY") + if not api_key: + raise ValueError( + "HONCHO_API_KEY is required. Set it in your environment or .env file." + ) + + env_workspace = os.getenv("HONCHO_WORKSPACE_ID") + resolved_workspace = workspace_id or env_workspace or "default" + return Honcho(api_key=api_key, workspace_id=resolved_workspace) diff --git a/examples/openai-agents/python/tools/get_context.py b/examples/openai-agents/python/tools/get_context.py new file mode 100644 index 000000000..1e2307094 --- /dev/null +++ b/examples/openai-agents/python/tools/get_context.py @@ -0,0 +1,35 @@ +"""Retrieve Honcho conversation context formatted for LLM injection.""" + +from __future__ import annotations + +from .client import HonchoContext, get_client + + +def get_context( + ctx: HonchoContext, + tokens: int = 2000, +) -> list[dict[str, str]]: + """Retrieve conversation context ready for injection into an LLM prompt. + + Fetches recent messages from a Honcho session within the given token + budget and converts them to OpenAI-compatible message format. The + returned list is suitable for use as prior messages in a + ``Runner.run()`` input or as context in dynamic agent instructions. + + Args: + ctx: ``HonchoContext`` holding the user, session, and assistant IDs. + tokens: Maximum number of tokens to include. Defaults to ``2000``. + + Returns: + A list of message dicts in OpenAI format: + ``[{"role": "user" | "assistant", "content": "..."}]``. + Returns an empty list if the session has no messages yet. + """ + honcho = get_client() + session = honcho.session(ctx.session_id) + context = session.context(tokens=tokens) + # Strip 'name' field — the OpenAI Responses API does not accept it + return [ + {"role": m["role"], "content": m["content"]} + for m in context.to_openai(assistant=ctx.assistant_id) + ] diff --git a/examples/openai-agents/python/tools/query_memory.py b/examples/openai-agents/python/tools/query_memory.py new file mode 100644 index 000000000..24cbdd32a --- /dev/null +++ b/examples/openai-agents/python/tools/query_memory.py @@ -0,0 +1,38 @@ +"""Query Honcho memory via the Dialectic API — exposed as an agent tool.""" + +from agents import RunContextWrapper, function_tool + +from .client import HonchoContext, get_client + + +@function_tool +def query_memory(ctx: RunContextWrapper[HonchoContext], query: str) -> str: + """Query what Honcho knows about the current user using natural language. + + Sends a question to Honcho's Dialectic API and returns an answer grounded + in the peer's long-term memory and stored observations. Use this tool when + the user asks about their own history, preferences, or past conversations. + + Args: + ctx: Run context carrying the ``HonchoContext`` with user identity. + query: Natural language question, e.g. ``"What are my hobbies?"`` or + ``"What did we discuss last time?"``. + + Returns: + A natural language answer drawn from Honcho's memory, or a fallback + message if no relevant information was found. + + Raises: + ValueError: If query is empty. + """ + cleaned_query = query.strip() + if not cleaned_query: + raise ValueError("query must not be empty") + + honcho = get_client() + peer = honcho.peer(ctx.context.user_id) + response = peer.chat(query=cleaned_query) + + if response: + return str(response) + return "No relevant information found in memory." diff --git a/examples/openai-agents/python/tools/save_memory.py b/examples/openai-agents/python/tools/save_memory.py new file mode 100644 index 000000000..3fb2ae1eb --- /dev/null +++ b/examples/openai-agents/python/tools/save_memory.py @@ -0,0 +1,44 @@ +"""Save a conversation message to Honcho memory.""" + +from .client import get_client + + +def save_memory( + user_id: str, + content: str, + role: str, + session_id: str, + assistant_id: str = "assistant", +) -> str: + """Save a single conversation turn to Honcho memory. + + Creates the peer and session if they do not already exist. Registers + both peers in the session on first use, then persists the message. + + Args: + user_id: Unique identifier for the user peer. + content: Text content of the message to save. + role: Either ``"user"`` or ``"assistant"``. Any value other than + ``"assistant"`` is treated as the user peer. + session_id: Identifier for the conversation session. + assistant_id: Peer ID for the assistant. Defaults to ``"assistant"``. + + Returns: + A confirmation string describing what was saved. + + Raises: + ValueError: If content is empty. + """ + cleaned_content = content.strip() + if not cleaned_content: + raise ValueError("content must not be empty") + + honcho = get_client() + user_peer = honcho.peer(user_id) + assistant_peer = honcho.peer(assistant_id) + session = honcho.session(session_id) + + sender = assistant_peer if role == "assistant" else user_peer + session.add_messages([sender.message(cleaned_content)]) + + return f"Saved {role} message to session '{session_id}' for user '{user_id}'." diff --git a/examples/openai-agents/typescript/main.ts b/examples/openai-agents/typescript/main.ts new file mode 100644 index 000000000..c16d3768f --- /dev/null +++ b/examples/openai-agents/typescript/main.ts @@ -0,0 +1,103 @@ +/** + * OpenAI Agents JS + Honcho persistent memory integration. + * + * Demonstrates a conversational agent that remembers users across sessions. + * Honcho stores every message and builds a long-term representation of the user; + * the agent injects that context into its instructions on every turn and can + * query memory on demand via the `query_memory` tool. + * + * Usage: + * bun run main.ts + * + * Environment variables: + * HONCHO_API_KEY Required. Your Honcho API key from honcho.dev. + * HONCHO_WORKSPACE_ID Optional. Workspace ID (default: "default"). + * OPENAI_API_KEY Required. Your OpenAI API key. + */ + +import { Agent, run } from "@openai/agents"; +import * as readline from "readline/promises"; +import { randomUUID } from "crypto"; + +import { createContext, getClient } from "./tools/client.js"; +import type { HonchoContext } from "./tools/client.js"; +import { getContext } from "./tools/getContext.js"; +import { queryMemoryTool } from "./tools/queryMemory.js"; +import { saveMemory } from "./tools/saveMemory.js"; + +const INSTRUCTIONS = + "You are a helpful assistant with persistent memory powered by Honcho. " + + "You remember users across conversations. " + + "When a user asks what you remember about them, use the query_memory tool."; + +const honchoAgent = new Agent({ + name: "HonchoMemoryAgent", + instructions: INSTRUCTIONS, + tools: [queryMemoryTool], + model: "gpt-4.1-mini", +}); + +async function setupSession( + userId: string, + sessionId: string, + assistantId = "assistant" +): Promise { + const honcho = getClient(); + const userPeer = honcho.peer(userId); + const assistantPeer = honcho.peer(assistantId); + const session = honcho.session(sessionId); + await session.addPeers([userPeer, assistantPeer]); +} + +export async function chat( + userId: string, + message: string, + sessionId: string, + assistantId = "assistant" +): Promise { + const ctx = createContext(userId, sessionId, assistantId); + + // Fetch prior history BEFORE saving the current turn to avoid duplicating it + const history = await getContext(ctx, 2000); + const input = [...history, { role: "user" as const, content: message }]; + + await saveMemory(userId, message, "user", sessionId, assistantId); + + const result = await run(honchoAgent, input, { context: ctx }); + const response = result.finalOutput ?? ""; + + await saveMemory(userId, response, "assistant", sessionId, assistantId); + return response; +} + +async function main() { + console.log("OpenAI Agents HonchoMemoryAgent — type 'quit' to exit\n"); + // Replace "demo-user" with a real user identifier in production. + const userId = "demo-user"; + // A fresh session ID per run prevents history from accumulating across runs. + const sessionId = randomUUID(); + + await setupSession(userId, sessionId); + + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + + while (true) { + const userInput = (await rl.question("You: ")).trim(); + if (!userInput) continue; + if (["quit", "exit"].includes(userInput.toLowerCase())) { + rl.close(); + break; + } + try { + const response = await chat(userId, userInput, sessionId); + console.log(`Agent: ${response}\n`); + } catch (err) { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}\n`); + } + } +} + +main().catch(console.error); diff --git a/examples/openai-agents/typescript/package.json b/examples/openai-agents/typescript/package.json new file mode 100644 index 000000000..4ba92661d --- /dev/null +++ b/examples/openai-agents/typescript/package.json @@ -0,0 +1,18 @@ +{ + "name": "honcho-openai-agents-example", + "version": "1.0.0", + "description": "Honcho persistent memory integration for OpenAI Agents JS SDK", + "type": "module", + "scripts": { + "start": "bun run main.ts" + }, + "dependencies": { + "@openai/agents": "^0.0.11", + "honcho-ai": "^0.1.0", + "zod": "^3.24.0" + }, + "devDependencies": { + "typescript": "^5.0.0", + "@types/node": "^20.0.0" + } +} diff --git a/examples/openai-agents/typescript/tools/client.ts b/examples/openai-agents/typescript/tools/client.ts new file mode 100644 index 000000000..2177adb2c --- /dev/null +++ b/examples/openai-agents/typescript/tools/client.ts @@ -0,0 +1,30 @@ +/** + * Honcho client initialization and context for OpenAI Agents JS integration. + */ + +import Honcho from "honcho-ai"; + +export interface HonchoContext { + userId: string; + sessionId: string; + assistantId: string; +} + +export function createContext( + userId: string, + sessionId: string, + assistantId = "assistant" +): HonchoContext { + return { userId, sessionId, assistantId }; +} + +export function getClient(): Honcho { + const apiKey = process.env.HONCHO_API_KEY; + if (!apiKey) { + throw new Error( + "HONCHO_API_KEY is required. Set it in your environment or .env file." + ); + } + const workspaceId = process.env.HONCHO_WORKSPACE_ID ?? "default"; + return new Honcho({ apiKey, workspaceId }); +} diff --git a/examples/openai-agents/typescript/tools/getContext.ts b/examples/openai-agents/typescript/tools/getContext.ts new file mode 100644 index 000000000..7c7079aea --- /dev/null +++ b/examples/openai-agents/typescript/tools/getContext.ts @@ -0,0 +1,24 @@ +/** + * Retrieve Honcho conversation context formatted for LLM injection. + */ + +import { getClient } from "./client.js"; +import type { HonchoContext } from "./client.js"; + +export interface Message { + role: "user" | "assistant"; + content: string; +} + +export async function getContext( + ctx: HonchoContext, + tokens = 2000 +): Promise { + const honcho = getClient(); + const session = honcho.session(ctx.sessionId); + const context = await session.context({ tokens }); + // Strip the 'name' field — the OpenAI Responses API does not accept it + return (context.toOpenai({ assistant: ctx.assistantId }) as Array>).map( + ({ role, content }) => ({ role: role as Message["role"], content }) + ); +} diff --git a/examples/openai-agents/typescript/tools/queryMemory.ts b/examples/openai-agents/typescript/tools/queryMemory.ts new file mode 100644 index 000000000..e78a0b7d1 --- /dev/null +++ b/examples/openai-agents/typescript/tools/queryMemory.ts @@ -0,0 +1,39 @@ +/** + * Query Honcho memory via the Dialectic API — exposed as an agent tool. + */ + +import { tool } from "@openai/agents"; +import { z } from "zod"; +import { getClient } from "./client.js"; +import type { HonchoContext } from "./client.js"; + +export const queryMemoryTool = tool({ + name: "query_memory", + description: + "Query what Honcho knows about the current user using natural language. " + + "Use this when the user asks what you remember about them.", + parameters: z.object({ + query: z.string().describe("Natural language question about the user"), + }), + execute: async ({ query }, runContext) => { + const trimmed = query.trim(); + if (!trimmed) { + throw new Error("query must not be empty"); + } + + const ctx = runContext?.context as HonchoContext | undefined; + if (!ctx?.userId) { + throw new Error("Missing Honcho context (userId)"); + } + try { + const honcho = getClient(); + const peer = honcho.peer(ctx.userId); + const response = await peer.chat(trimmed); + return response ?? "No relevant information found in memory."; + } catch (err) { + throw new Error( + `Failed to query Honcho memory: ${err instanceof Error ? err.message : String(err)}` + ); + } + }, +}); diff --git a/examples/openai-agents/typescript/tools/saveMemory.ts b/examples/openai-agents/typescript/tools/saveMemory.ts new file mode 100644 index 000000000..7435f7eeb --- /dev/null +++ b/examples/openai-agents/typescript/tools/saveMemory.ts @@ -0,0 +1,28 @@ +/** + * Save a conversation message to Honcho memory. + */ + +import { getClient } from "./client.js"; + +type Role = "user" | "assistant"; + +export async function saveMemory( + userId: string, + content: string, + role: Role, + sessionId: string, + assistantId = "assistant" +): Promise { + const cleanedContent = content.trim(); + if (!cleanedContent) { + throw new Error("content must not be empty"); + } + + const honcho = getClient(); + const userPeer = honcho.peer(userId); + const assistantPeer = honcho.peer(assistantId); + const session = honcho.session(sessionId); + + const sender = role === "assistant" ? assistantPeer : userPeer; + await session.addMessages([sender.message(cleanedContent)]); +} diff --git a/examples/openai-agents/typescript/tsconfig.json b/examples/openai-agents/typescript/tsconfig.json new file mode 100644 index 000000000..95b5a2cd4 --- /dev/null +++ b/examples/openai-agents/typescript/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "outDir": "dist" + }, + "include": ["*.ts", "tools/*.ts"] +}