A lightweight Python agent framework based on OpenAI-compatible API.
- Chat: Streaming, retry, connection pooling
- Tools: Decorator-based registration with auto schema generation
- Skills: Progressive loading with YAML frontmatter
- Multimodal: Image/audio/video with auto base64 encoding
- MCP: Model Context Protocol (stdio/sse/http)
- Memory: Short-term (sliding window) + Long-term (mem0/vector/agentic)
- Pipeline: Sequential, fanout, aggregation workflows
- Embedder: OpenAI-compatible embedding with caching
import asyncio
from agent import Chater, ChaterConfig, ClientConfig, ChatConfig
config = ChaterConfig(
client_config=ClientConfig(api_key="sk-xxx", base_url="https://api.openai.com/v1"),
chat_config=ChatConfig(model="gpt-4o")
)
async def main():
async with Chater(config) as chater:
response = await chater.chat_fn(
messages=[{"role": "user", "content": "Hello!"}],
stream=True,
on_chunk=lambda c: print(c.content or "", end="", flush=True),
)
print(f"\nTokens: {response.usage.total_tokens}")
asyncio.run(main())from agent import ToolKit
toolkit = ToolKit()
@toolkit.register
def get_weather(city: str) -> str:
"""Get weather for a city.
Args:
city: The city name.
"""
return f"Weather in {city}: Sunny"
tools = toolkit.to_openai()from agent import ReactAgent
# Uses default REACT_AGENT_PROMPT, or customize with .system()
agent = (
ReactAgent(chater=chater, toolkit=toolkit, max_iters=10)
.on_chunk(lambda c: print(c.content or "", end="", flush=True))
.on_tool_call(lambda tc: print(f"[Tool: {tc.name}]"))
.on_tool_result(lambda tr: print(f"[Result: {tr.output}]"))
)
response = await agent("What's the weather in Beijing?")PlanAgent extends ReactAgent with planning capability for complex tasks:
from agent import PlanAgent
# Uses default PLAN_AGENT_PROMPT with planning instructions
agent = (
PlanAgent(chater=chater, toolkit=toolkit, max_iters=25, max_subtasks=10)
.on_plan_change(lambda p: print(f"Plan: {p.name}" if p else "Plan done"))
)
response = await agent("Build a simple REST API with Flask")
if agent.plan:
print(agent.plan.to_markdown())agent = ReactAgent(
chater=chater,
system="You are a coding assistant.",
skills_path="./skills",
)Skill format (SKILL.md):
---
name: code-helper
description: Python coding best practices
---
## Instructions
Follow PEP 8...from agent import BinaryContent
content = [
BinaryContent.text("Describe this image"),
BinaryContent.image("./photo.jpg"),
]
response = await chater.chat_fn(
messages=[{"role": "user", "content": [c.to_openai() for c in content]}]
)from agent import Mem0Memory, VectorMemory, AgenticMemory
# mem0-based long-term memory
memory = Mem0Memory(user_id="user_123")
await memory.put("User prefers concise answers")
results = await memory.get("user preferences", k=5)
# Vector store memory
from agent import NumpyStore, Embedder
memory = VectorMemory(store=NumpyStore(), embedder=embedder)
# Agentic memory (A-MEM with evolution)
memory = AgenticMemory(
chat_fn=chater.chat_fn, # for content analysis
embed_fn=embedder.embed_one, # optional
evolve=True, # enable memory evolution
)from agent import ChaterPool
async with ChaterPool([config1, config2], policy="round_robin") as pool:
response = await pool.chat_fn(messages=[...])Policies: round_robin, random, least_used
from agent import ToolKit, MCPConfig
toolkit = ToolKit()
await toolkit.connect_mcp(MCPConfig(
name="local",
transport="stdio",
command="python",
args=["mcp_server.py"],
))MIT