Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 72 additions & 10 deletions agents/agent1.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,39 +26,95 @@

# ── 2. Tools ───────────────────────────────────────────────────────────────
def get_weather(lat: float, lon: float) -> dict:

"""
Return today's forecast:
{ "high": °C, "low": °C, "conditions": str }
"""
url = (
"https://api.open-meteo.com/v1/forecast"
f"?latitude={lat}&longitude={lon}"
"&daily=weathercode,temperature_2m_max,temperature_2m_min"
"&forecast_days=1&timezone=auto"
)

# Retry up to 3 times
max_retries = 3
for attempt in range(max_retries):
try:

r = requests.get(url, timeout=15)
r.raise_for_status()
daily = r.json()["daily"]
return {
"high": daily["temperature_2m_max"][0],
"low": daily["temperature_2m_min"][0],
"conditions": WEATHER_CODES.get(daily["weathercode"][0], "Unknown"),
}
except (requests.Timeout, requests.ConnectionError) as e:
if attempt == max_retries - 1:
raise # Re-raise on final attempt
print(f" ⚠️ Retry {attempt + 1}/{max_retries - 1} after timeout...")
time.sleep(2) # Wait 2 seconds before retrying

# ── 3. Tool registry ────────────────────────────────────────────────────────

TOOLS = {
"get_weather": get_weather,
}

# ── 4. LLM client ───────────────────────────────────────────────────────────

llm = ChatOllama(model="llama3.2", temperature=0.0)

# ── 5. System prompt ────────────────────────────────────────────────────────
SYSTEM = textwrap.dedent("""

You are a weather agent with one tool:

get_weather(lat:float, lon:float)
→ {"high": float, "low": float, "conditions": str}
Returns today's weather forecast with temperatures in Celsius

You MUST follow this exact format. Do NOT add extra text or explanations.

To use the tool, output EXACTLY this format:
Thought: <your reasoning>
Action: get_weather
Args: {"lat": <latitude>, "lon": <longitude>}

Example:
Thought: I need to get weather for London at coordinates 51.5074, -0.1278
Action: get_weather
Args: {"lat": 51.5074, "lon": -0.1278}

When you have the information needed to answer, output:
Thought: <your reasoning>
Final: <complete natural language answer - NO Thought/Action/Args format here>

Example of Final:
Thought: I now have the weather data for London
Final: Today in London will be Slight rain showers with a high of 12.7°C and a low of 8.6°C.

CRITICAL RULES:
1. Follow the format EXACTLY - every response must start with "Thought:"
2. NEVER make up or hallucinate tool results
3. After outputting Action/Args, STOP and wait for Observation
4. Only proceed after you receive the actual Observation
5. After "Final:" output ONLY plain text - do NOT use Thought/Action/Args format
""").strip()

# ── 6. TAO run helper ───────────────────────────────────────────────────────
def run(question: str) -> str:

"""Execute the TAO loop, letting the AI decide which tools to call."""
messages = [
{"role": "system", "content": SYSTEM},
{"role": "user", "content": question},
]

print("\n--- Thought → Action → Observation loop ---\n")

max_iterations = 5 # Safety limit
for i in range(max_iterations):

# Get AI's next step
reply = llm.invoke(messages)
response = reply.content.strip()
print(response + "\n")

# Check if AI is done
if "Final:" in response:
Expand All @@ -70,20 +126,26 @@ def run(question: str) -> str:
if "Action:" in response and "Args:" in response:
try:
# Extract action and args

action_line = response.split("Action:")[1].split("\n")[0].strip()
args_text = response.split("Args:")[1].split("\n")[0].strip()

# Get the tool function
tool_name = action_line
tool_func = TOOLS.get(tool_name)

if tool_func is None:
print(f"⚠️ Unknown tool: '{tool_name}'\n")
print(f"Available tools: {list(TOOLS.keys())}\n")
break

# Parse arguments and call the tool

args = json.loads(args_text)
observation = tool_func(**args)
print(f"Observation: {observation}\n")

# Add to conversation history

messages.append({"role": "assistant", "content": response})
messages.append({"role": "user", "content": f"Observation: {observation}"})
except json.JSONDecodeError as e:
print(f"⚠️ Failed to parse Args as JSON: {e}\n")
print(f"Args text was: {args_text}\n")
Expand Down
99 changes: 97 additions & 2 deletions agents/mcp_agent_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@
A TRUE agentic implementation where the LLM dynamically selects which
tools to call and when to stop. This demonstrates:

* **LLM-Driven Control Flow**: Agent loop runs until LLM says "DONE"
* **Dynamic Tool Selection**: LLM chooses which MCP tool to invoke each step
* **Flexible Reasoning**: Can handle queries requiring different tool sequences
* **TAO Protocol**: Full thought/action/observation trace with real agent behavior

Example Flows:
1. Standard: geocode → get_weather → convert_c_to_f → DONE
2. With coords: get_weather → convert_c_to_f → DONE (skip geocode)
3. Celsius OK: geocode → get_weather → DONE (skip conversion)

Prerequisites: FastMCP weather server must be running on localhost:8000
"""
Expand All @@ -19,9 +28,51 @@
from fastmcp.exceptions import ToolError
from langchain_ollama import ChatOllama

# ╔══════════════════════════════════════════════════════════════════╗
# ║ 1. Enhanced system prompt for dynamic tool selection ║
# ╚══════════════════════════════════════════════════════════════════╝
SYSTEM = textwrap.dedent("""
You are a weather information agent with access to these tools:

geocode_location(name: str)
Converts a city/location name to coordinates
Returns: {"latitude": float, "longitude": float, "name": str}

get_weather(lat: float, lon: float)
Gets current weather for coordinates
Returns: {"temperature": float, "code": int, "conditions": str}
Note: Temperature is in Celsius

convert_c_to_f(c: float)
Converts Celsius to Fahrenheit
Returns: float

IMPORTANT: When you have enough information to answer the user's question,
respond with:
Thought: I have all the information needed
Action: DONE
Args: {}

For each step where you need to call a tool, respond with EXACTLY three lines:

Thought: <your reasoning about what to do next>
Action: <exact tool name: geocode_location, get_weather, convert_c_to_f, or DONE>
Args: <valid JSON arguments for the tool>

Examples:
Thought: I need to find the coordinates for Paris first
Action: geocode_location
Args: {"name": "Paris"}

Thought: Now I'll get the weather at those coordinates
Action: get_weather
Args: {"lat": 48.8566, "lon": 2.3522}

Thought: I need to convert 20.5 Celsius to Fahrenheit
Action: convert_c_to_f
Args: {"c": 20.5}

Do NOT add extra text. Do NOT explain after your three lines.
""").strip()

# Regex patterns for parsing LLM responses
Expand Down Expand Up @@ -74,6 +125,14 @@ def extract_city(prompt: str) -> Optional[str]:
# ╔══════════════════════════════════════════════════════════════════╗
# ║ 4. Dynamic TAO loop with LLM-controlled tool selection ║
# ╚══════════════════════════════════════════════════════════════════╝
async def run_dynamic(city: str, max_steps: int = 10) -> None:
"""
Run a dynamic TAO agent loop where the LLM decides which tools to call.

Args:
city: The city to query about
max_steps: Maximum number of tool calls to prevent infinite loops
"""
llm = ChatOllama(model="llama3.2", temperature=0.0)

async with Client("http://127.0.0.1:8000/mcp/") as mcp:
Expand All @@ -82,10 +141,32 @@ def extract_city(prompt: str) -> Optional[str]:
{"role": "user", "content": f"What is the current weather in {city}?"},
]

print("\n" + "="*60)
print("Dynamic TAO Agent - LLM Controls Tool Selection")
print("="*60 + "\n")

# Store context for final answer
context = {
"city": city,
"latitude": None,
"longitude": None,
"temperature_c": None,
"temperature_f": None,
"conditions": None,
}

for step in range(1, max_steps + 1):
print(f"[Step {step}]")


# Get LLM's decision
response = llm.invoke(messages).content.strip()
print(response)

# Parse the action
action_match = ACTION_RE.search(response)
if not action_match:
print("\n❌ Error: Could not parse Action from LLM response")
return

action = action_match.group(1).lower()

Expand Down Expand Up @@ -121,7 +202,11 @@ def extract_city(prompt: str) -> Optional[str]:
print(f"\n❌ Error: Invalid JSON in Args: {e}")
return

# Dynamically call the tool the LLM selected
print(f"\n→ Calling MCP tool: {action}({json.dumps(args)})")

try:
result = unwrap(await mcp.call_tool(action, args))
except ToolError as e:
print(f"❌ MCP Error: {e}\n")
# Add error to conversation and let LLM try to recover
Expand All @@ -148,7 +233,17 @@ def extract_city(prompt: str) -> Optional[str]:
context["temperature_c"] = result.get("temperature")
context["conditions"] = result.get("conditions")
elif action == "convert_c_to_f":
context["temperature_f"] = float(result)
context["temperature_f"] = float(result)

# Show observation
observation = f"Observation: {json.dumps(result) if isinstance(result, dict) else result}"
print(observation)
print()

# Add to conversation history
messages.append({"role": "assistant", "content": response})
messages.append({"role": "user", "content": observation})

# Max steps reached
print(f"\n⚠️ Reached maximum steps ({max_steps}) without completion")
print("Partial information gathered:")
Expand Down
46 changes: 40 additions & 6 deletions agents/mcp_server_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
# ─── Weather Tool ────────────────────────────────────────────────────

@mcp.tool

def get_weather(lat: float, lon: float) -> dict:
"""
Fetch **current weather** from Open-Meteo and return a concise dict.

Expand All @@ -92,8 +92,18 @@

Returns
-------


dict
{
"temperature": <float °C>,
"code": <int WMO weathercode>,
"conditions": <friendly description>,
"error": <error message if request failed>
}
"""
url = (
"https://api.open-meteo.com/v1/forecast"
f"?latitude={lat}&longitude={lon}&current_weather=true"
)

last_error = None

Expand All @@ -114,7 +124,14 @@

resp.raise_for_status()


# Extract and return weather data
cw = resp.json()["current_weather"]
code = cw["weathercode"]
return {
"temperature": cw["temperature"],
"code": code,
"conditions": WEATHER_CODES.get(code, "Unknown"),
}

except requests.HTTPError as e:
# HTTP errors (4xx, 5xx not already caught)
Expand Down Expand Up @@ -145,7 +162,9 @@
# ─── Temperature Conversion Tool ─────────────────────────────────────

@mcp.tool

def convert_c_to_f(c: float) -> float:
"""Simple Celsius → Fahrenheit conversion."""
return c * 9 / 5 + 32


# ─── Geocoding Tool ──────────────────────────────────────────────────
Expand All @@ -169,7 +188,16 @@ def geocode_location(name: str) -> dict:

Returns
-------

dict
{
"latitude": <float>,
"longitude": <float>,
"name": <matched location name>,
"error": <error message if request failed>
}
"""
url = "https://geocoding-api.open-meteo.com/v1/search"
last_error = None

# Retry loop with fresh connections
for attempt in range(MAX_RETRIES):
Expand Down Expand Up @@ -234,3 +262,9 @@ def geocode_location(name: str) -> dict:
if __name__ == "__main__":
# Start HTTP server using FastAPI + Uvicorn
# Clients connect to: http://127.0.0.1:8000/mcp/
mcp.run(
transport="http",
host="127.0.0.1",
port=8000,
path="/mcp/",
)
Loading