A powerful Go framework for building intelligent agents with LLM integration, tool execution, and multi-agent collaboration.
- π Simple Agent Creation - Create AI agents with just a few lines of code
- π§ Extensible Tool System - Build custom tools with automatic validation and execution
- π₯ Multi-Agent Teams - Orchestrate teams of specialized agents working together
- π Streaming Responses - Real-time streaming of agent responses and tool execution
- πΎ Conversation Persistence - Built-in support for conversation history storage
- π― Progressive Discovery - Agents can discover tool and sub-agent capabilities at runtime
- π Multiple LLM Providers - Support for OpenAI, DeepSeek, TogetherAI, and any OpenAI-compatible API
go get github.com/thinktwice/agentForgepackage main
import (
"context"
"fmt"
"github.com/thinktwice/agentForge/src/agents"
"github.com/thinktwice/agentForge/src/llms"
)
func main() {
ctx := context.Background()
// Create an LLM engine
llm, err := llms.GetTogetherAILLM(ctx, llms.Llama3170BInstructTurbo)
if err != nil {
panic(err)
}
// Create an agent
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "Assistant",
Description: "A helpful AI assistant",
SystemPrompt: "You are a helpful and intelligent AI assistant.",
MainAgent: true,
})
// Chat with the agent
responseCh := agent.ChatStream("Hello! How can you help me?")
// Process streaming response
for chunk := range responseCh.Start() {
if chunk.Content != "" {
fmt.Print(chunk.Content)
}
}
}Create an agent using the AgentConfig struct:
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm, // Required: LLM engine
AgentName: "my-agent", // Required: Agent name
Description: "Basic description", // Optional: Short description
SystemPrompt: "You are...", // Optional: Custom system prompt
Tools: []llms.Tool{}, // Optional: Available tools
MaxToolIterations: 10, // Optional: Max tool execution loops (default: 10)
MainAgent: true, // Optional: Is this the main agent?
Persistence: "json", // Optional: Enable conversation history
})All agent responses are streamed in real-time:
responseCh := agent.ChatStream("What is the capital of France?")
for chunk := range responseCh.Start() {
switch chunk.Type {
case llms.TypeContent:
// Content being streamed
fmt.Print(chunk.Content)
case llms.TypeCompletion:
// Final completion with token usage
fmt.Printf("\nTokens: %d total\n", chunk.TotalTokens)
case llms.TypeToolExecuting:
// Tool is being executed
fmt.Printf("Executing: %s\n", chunk.ToolExecuting.Name)
case llms.TypeToolResult:
// Tool execution completed
for _, result := range chunk.ToolResults {
fmt.Printf("Result: %s\n", result.Result)
}
}
if chunk.Status == llms.StatusError {
fmt.Printf("Error: %s\n", chunk.Content)
}
}llm, err := llms.GetTogetherAILLM(ctx, llms.Llama3170BInstructTurbo)
// Requires: TOGETHERAI_API_KEY environment variablellm, err := llms.GetDeepSeekLLM(ctx, "deepseek-chat")
// Requires: DEEPSEEK_API_KEY environment variableimport "github.com/thinktwice/agentForge/src/llms"
// For any OpenAI-compatible API, use the internal constructor
// (Note: This requires accessing the internal newOpenAILLM function)Tools extend agent capabilities using a universal tool system where all tools receive agent context:
All tools are created with core.NewTool and receive both agent context and arguments:
import "github.com/thinktwice/agentForge/src/core"
calculatorTool := core.NewTool(
"calculate", // Tool name
"Performs mathematical calculations", // Basic description
`Advanced Details: // Advanced description
- Supports: +, -, *, /
- Returns numeric result`,
`Troubleshooting: // Troubleshooting info
- Division by zero returns error
- Use proper syntax: "2 + 2"`,
[]core.Parameter{ // Parameters
{
Name: "expression",
Type: "string",
Description: "Mathematical expression to evaluate",
Required: true,
},
},
func(agentContext map[string]any, args map[string]any) llms.ToolReturn {
// Tools that don't need context can ignore it
expression := args["expression"].(string)
// Perform calculation
result := calculate(expression)
return core.NewSuccessResponse(result)
},
)Tools can access agent context for advanced functionality:
loggerTool := core.NewTool(
"log_message",
"Logs a message with agent context",
"Logs messages with agent name and trace information",
"Check file permissions if logging fails",
[]core.Parameter{
{Name: "message", Type: "string", Required: true},
{Name: "level", Type: "string", Required: false},
},
func(agentContext map[string]any, args map[string]any) llms.ToolReturn {
// Access agent context
agentName := agentContext["agentName"].(string)
message := args["message"].(string)
// Log with context
log.Printf("[%s] %s", agentName, message)
return core.NewSuccessResponse("Logged successfully")
},
)Supported parameter types with automatic validation:
[]core.Parameter{
{Name: "text", Type: "string", Required: true},
{Name: "count", Type: "number", Required: true},
{Name: "enabled", Type: "boolean", Required: false},
{Name: "config", Type: "object", Required: false},
{Name: "items", Type: "array", Required: false},
}Add custom validators for complex validation logic:
{
Name: "email",
Type: "string",
Description: "User email address",
Required: true,
Validator: func(value any) error {
email := value.(string)
if !strings.Contains(email, "@") {
return fmt.Errorf("invalid email format")
}
return nil
},
}// Success response
return core.NewSuccessResponse("Operation completed successfully")
// Error response (no data)
return core.NewErrorResponse("Invalid parameter: count must be positive")
// Failure response (with partial data)
return core.NewFailureResponse("Timeout occurred", "Partial result: 42")agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "tool-user",
Tools: []llms.Tool{
calculatorTool,
weatherTool,
databaseTool,
},
})
// Or add/modify tools after creation
agent.SetTools([]llms.Tool{newTool1, newTool2})
existingTools := agent.GetTools()Multi-agent systems allow specialization and delegation:
// Create specialized sub-agents
reasoningAgent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "reasoning-agent",
Description: "Breaks down complex problems into logical steps",
SystemPrompt: `You are a reasoning agent that excels at analytical thinking.
Break down complex problems systematically.`,
})
dataAgent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "data-agent",
Description: "Analyzes and processes data",
SystemPrompt: "You are a data analysis expert.",
Tools: []llms.Tool{
databaseTool,
analyticsTool,
},
})
// Create main agent that coordinates the team
mainAgent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "coordinator",
Description: "Main coordinator agent",
SystemPrompt: "You coordinate a team of specialized agents.",
MainAgent: true,
})
// Add sub-agents using the delegate tool
// Sub-agents are automatically available through the "delegate" toolEnable automatic reasoning capabilities:
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "smart-agent",
Reasoning: true, // Automatically adds a reasoning sub-agent
MainAgent: true,
})When agents have sub-agents, they automatically get a delegate tool:
// The main agent can delegate to sub-agents
// This happens automatically based on the system promptThe delegation is transparent:
mainAgent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "main",
Reasoning: true,
MainAgent: true,
})
// User asks a complex question
responseCh := mainAgent.ChatStream("Analyze the implications of quantum computing on cryptography")
// The agent may automatically delegate to the reasoning agent
for chunk := range responseCh.Start() {
// Chunks include AgentName and Trace to identify the source
fmt.Printf("[%s] %s", chunk.AgentName, chunk.Content)
}Use different LLM engines for different sub-agents:
fastLLM, _ := llms.GetTogetherAILLM(ctx, llms.Llama323BInstructTurbo)
powerfulLLM, _ := llms.GetTogetherAILLM(ctx, llms.Llama3170BInstructTurbo)
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: powerfulLLM,
AgentName: "main",
Reasoning: true,
MainAgent: true,
ExtraEngines: map[string]llms.LLMEngine{
"system-reasoning": fastLLM, // Use faster model for reasoning
},
})Agents can discover information about other agents at runtime using the expand tool:
import "github.com/thinktwice/agentForge/src/tools"
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "explorer",
Tools: []llms.Tool{
tools.NewExpandTool(), // Enables progressive discovery
},
})
// The agent can now query detailed information about tools and sub-agents
// Example: expand(subject_type="agent", subject_name="reasoning-agent", troubleshoot=true)The Expand tool provides three levels of information:
- BasicDescription: Short one-line description (always visible in system prompt)
- AdvanceDescription: Detailed capabilities and usage patterns
- Troubleshooting: Common issues and debugging tips
Store and retrieve conversation history:
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "persistent-agent",
Persistence: "json", // Stores history as JSON files
})
// History is automatically saved and loaded
// Each agent gets its own history file based on AgentNamePass custom context to all tools:
agent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "contextual-agent",
ToolExecutionContext: map[string]any{
"database": dbConnection,
"user_id": userID,
"config": appConfig,
},
})
// Tools can access this context
customTool := core.NewTool(
"query_db",
"Queries the database",
"...",
"...",
[]core.Parameter{{Name: "query", Type: "string", Required: true}},
func(agentContext map[string]any, args map[string]any) llms.ToolReturn {
db := agentContext["database"].(*sql.DB)
userID := agentContext["user_id"].(string)
// Use context in tool execution
return core.NewSuccessResponse(result)
},
)All tools created with core.NewTool automatically implement the Discoverable interface:
// Tools automatically provide:
// - BasicDescription() - returns the description parameter
// - AdvanceDescription() - returns the advanceDescription parameter
// - Troubleshooting() - returns the troubleshooting parameter
// For custom tool implementations, implement the interface:
type MyCustomTool struct {
// ... your fields
}
func (t *MyCustomTool) BasicDescription() string {
return "Short one-line description"
}
func (t *MyCustomTool) AdvanceDescription() string {
return "Detailed information about capabilities"
}
func (t *MyCustomTool) Troubleshooting() string {
return "Common issues and solutions"
}package main
import (
"context"
"fmt"
"github.com/thinktwice/agentForge/src/agents"
"github.com/thinktwice/agentForge/src/llms"
"github.com/thinktwice/agentForge/src/tools"
)
func main() {
ctx := context.Background()
// Initialize LLM
llm, err := llms.GetTogetherAILLM(ctx, llms.Llama3170BInstructTurbo)
if err != nil {
panic(err)
}
// Create a calculator tool
calcTool := core.NewTool(
"calculate",
"Performs mathematical calculations",
"Supports +, -, *, / operations",
"Use proper syntax: '2 + 2'",
[]core.Parameter{
{Name: "expression", Type: "string", Required: true},
},
func(agentContext map[string]any, args map[string]any) llms.ToolReturn {
expr := args["expression"].(string)
result := evaluate(expr)
return core.NewSuccessResponse(fmt.Sprintf("Result: %s", result))
},
)
// Create main agent with reasoning and tools
mainAgent := agents.NewAgent(agents.AgentConfig{
LLMEngine: llm,
AgentName: "MathAssistant",
Description: "An intelligent math assistant",
SystemPrompt: `You are a helpful math assistant.
You can solve mathematical problems using your tools and reasoning capabilities.`,
Tools: []llms.Tool{calcTool, tools.NewExpandTool()},
Reasoning: true, // Enable reasoning sub-agent
MainAgent: true,
Persistence: "json",
})
// Chat with the agent
responseCh := mainAgent.ChatStream("What is 15 multiplied by 23?")
fmt.Println("=== Agent Response ===")
for chunk := range responseCh.Start() {
if chunk.Content != "" {
fmt.Print(chunk.Content)
}
if chunk.Type == llms.TypeToolExecuting && chunk.ToolExecuting != nil {
fmt.Printf("\n[Executing: %s]\n", chunk.ToolExecuting.Name)
}
if chunk.Type == llms.TypeToolResult && len(chunk.ToolResults) > 0 {
fmt.Printf("\n[Tool Result: %s]\n", chunk.ToolResults[0].Result)
}
}
}
func evaluate(expr string) string {
// Your calculation logic here
return "345"
}The framework uses the following environment variables:
TOGETHERAI_API_KEY- API key for TogetherAIDEEPSEEK_API_KEY- API key for DeepSeekOPENAI_API_KEY- API key for OpenAI (if using OpenAI)
These can be set via:
.envfile in your project directory- System environment variables
github.com/thinktwice/agentForge/
βββ src/
β βββ agents/ # Agent implementation
β β βββ agent.go # Main agent struct and methods
β β βββ agentConfig.go
β βββ llms/ # LLM engine implementations
β β βββ factory.go # LLM factory functions
β β βββ openai.go # OpenAI-compatible client
β βββ tools/ # Tool system
β β βββ baseTool.go # Tool base implementation
β β βββ expandTool.go
β β βββ delegateTool.go
β βββ persistence/ # Conversation persistence
β βββ interfaces.go # Core interfaces
βββ examples/ # Example implementations
agents.Agent- Main agent typeagents.AgentConfig- Configuration for creating agentsllms.LLMEngine- Interface for LLM providersllms.Tool- Interface for toolsllms.ChunkResponse- Streaming response chunk
type Tool interface {
GetName() string
Call(agentContext map[string]any, args map[string]any) ToolReturn
GetFunctionDefinition() FunctionDefinition
}
type ToolReturn interface {
Success() bool
Error() string
Data() string
}
type Discoverable interface {
BasicDescription() string
AdvanceDescription() string
Troubleshooting() string
}Contributions are welcome! Please feel free to submit a Pull Request.
[Add your license here]
For questions, issues, or feature requests, please open an issue on GitHub.
