Skip to content

Commit 7212483

Browse files
authored
Merge pull request #130 from moyu026/master
add mcp
2 parents 58424f1 + 13b7da5 commit 7212483

File tree

2 files changed

+260
-0
lines changed

2 files changed

+260
-0
lines changed
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
#!/usr/local/bin/python
2+
# -*- coding: utf-8 -*-
3+
import pdb
4+
import logging
5+
import json
6+
import httpx
7+
from typing import Any
8+
from mcp.server.fastmcp import FastMCP
9+
10+
proxies = {
11+
"http://": httpx.Proxy("http://proxy-notebook.modelarts.com:8083"),
12+
"https://": httpx.Proxy("http://proxy-notebook.modelarts.com:8083"),
13+
}
14+
15+
# 初始化 MCP 服务器
16+
mcp = FastMCP("WeatherServer")
17+
18+
# OpenWeather API 配置
19+
OPENWEATHER_API_BASE = "https://api.openweathermap.org/data/2.5/weather"
20+
API_KEY = " "# "请替换为你自己的 OpenWeather API Key"
21+
USER_AGENT = "weather-app/1.0"
22+
23+
async def fetch_weather(city: str) -> dict[str, Any] | None:
24+
"""
25+
从 OpenWeather API 获取天气信息。
26+
:param city: 城市名称(需使用英文,如 Beijing)
27+
:return: 天气数据字典;若出错返回包含 error 信息的字典
28+
"""
29+
params = {
30+
"q": city,
31+
"appid": API_KEY,
32+
"units": "metric",
33+
"lang": "zh_cn"
34+
}
35+
headers = {"User-Agent": USER_AGENT}
36+
print(OPENWEATHER_API_BASE, params, headers)
37+
async with httpx.AsyncClient(proxies=proxies,timeout=30.0) as client:
38+
try:
39+
response = await client.get(OPENWEATHER_API_BASE, params=params, headers=headers)
40+
response.raise_for_status()
41+
return response.json() # 返回字典类型
42+
except httpx.HTTPStatusError as e:
43+
return {"error": f"HTTP 错误: {e.response.status_code}"}
44+
except Exception as e:
45+
return {"error": f"请求失败: {str(e)}"}
46+
47+
def format_weather(data: dict[str, Any] | str) -> str:
48+
"""
49+
将天气数据格式化为易读文本。
50+
:param data: 天气数据(可以是字典或 JSON 字符串)
51+
:return: 格式化后的天气信息字符串
52+
"""
53+
# 如果传入的是字符串,则先转换为字典
54+
if isinstance(data, str):
55+
try:
56+
data = json.loads(data)
57+
except Exception as e:
58+
return f"无法解析天气数据: {e}"
59+
60+
# 如果数据中包含错误信息,直接返回错误提示
61+
if "error" in data:
62+
return f"⚠️ {data['error']}"
63+
64+
# 提取数据时做容错处理
65+
city = data.get("name", "未知")
66+
country = data.get("sys", {}).get("country", "未知")
67+
temp = data.get("main", {}).get("temp", "N/A")
68+
humidity = data.get("main", {}).get("humidity", "N/A")
69+
wind_speed = data.get("wind", {}).get("speed", "N/A")
70+
# weather 可能为空列表,因此用 [0] 前先提供默认字典
71+
weather_list = data.get("weather", [{}])
72+
description = weather_list[0].get("description", "未知")
73+
74+
return (
75+
f"🌍 {city}, {country}\n"
76+
f"🌡 温度: {temp}°C\n"
77+
f"💧 湿度: {humidity}%\n"
78+
f"🌬 风速: {wind_speed} m/s\n"
79+
f"🌤 天气: {description}\n"
80+
)
81+
82+
@mcp.tool()
83+
async def query_weather(city: str) -> str:
84+
"""
85+
输入指定城市的英文名称,返回今日天气查询结果。
86+
:param city: 城市名称(需使用英文)
87+
:return: 格式化后的天气信息
88+
"""
89+
data = await fetch_weather(city)
90+
print(data)
91+
return format_weather(data)
92+
93+
94+
async def manual_get_weather():
95+
await query_weather("beijing")
96+
97+
import asyncio
98+
if __name__ == "__main__":
99+
# 以标准 I/O 方式运行 MCP 服务器
100+
mcp.run(transport='stdio')
101+
102+
103+
104+
105+
106+
107+
108+
109+
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
import asyncio
2+
import json
3+
from openai import OpenAI
4+
from mcp.client.stdio import stdio_client
5+
from mcp import ClientSession, StdioServerParameters
6+
7+
# 配置 vLLM 的 OpenAI 兼容 API
8+
OPENAI_API_KEY = "EMPTY" # vLLM 不需要真实密钥
9+
OPENAI_API_BASE = "http://localhost:8000/v1" # vLLM 的 OpenAI 兼容接口地址
10+
11+
class MCPClientDemo:
12+
def __init__(self, server_path: str):
13+
self.server_path = server_path
14+
# ✅ 使用 vLLM 的 OpenAI 兼容接口
15+
self.llm = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
16+
17+
async def run(self, user_query: str):
18+
server_params = StdioServerParameters(command="python", args=[self.server_path])
19+
async with stdio_client(server=server_params) as (read_stream, write_stream):
20+
async with ClientSession(read_stream, write_stream) as session:
21+
await session.initialize()
22+
23+
# 获取服务端注册的工具
24+
tools = (await session.list_tools()).tools
25+
26+
# 转换为 OpenAI 格式的 tools
27+
openai_tools = []
28+
for tool in tools:
29+
openai_tools.append({
30+
"type": "function",
31+
"function": {
32+
"name": tool.name,
33+
"description": tool.description or "",
34+
"parameters": tool.inputSchema or {
35+
"type": "object",
36+
"properties": {
37+
"city_name": {"type": "string", "description": "城市名称"}
38+
},
39+
"required": ["city_name"]
40+
}
41+
}
42+
})
43+
44+
# -------------------------------
45+
# 方法 1: vLLM + MCP 工具调用
46+
# -------------------------------
47+
# 第一步:让模型判断是否需要调用工具
48+
system_message = f"""
49+
你是一个智能助手。你可以使用以下工具:
50+
{json.dumps(openai_tools, ensure_ascii=False, indent=2)}
51+
52+
如果用户的问题需要调用工具,请回复 "TOOL_CALL: <工具名> <JSON参数>"。
53+
如果不需要调用工具,请直接回答。
54+
"""
55+
56+
messages_for_tool_decision = [
57+
{"role": "system", "content": system_message},
58+
{"role": "user", "content": user_query}
59+
]
60+
61+
# ✅ 使用 vLLM(通过 OpenAI 客户端)获取决策
62+
try:
63+
decision_response = self.llm.chat.completions.create(
64+
model="/home/ma-user/work/Qwen2.5-1.5B-Instruct/", # 模型名需与启动 vLLM 时一致
65+
messages=messages_for_tool_decision,
66+
max_tokens=512,
67+
)
68+
decision_text = decision_response.choices[0].message.content.strip()
69+
except Exception as e:
70+
decision_text = ""
71+
print(f"调用 vLLM 失败: {e}")
72+
73+
result_with_tool = {"model_reply": "", "tool_called": None, "tool_result": None}
74+
75+
# 检查是否需要调用工具
76+
if decision_text.startswith("TOOL_CALL:"):
77+
try:
78+
# 解析工具调用指令
79+
_, tool_name, args_json_str = decision_text.split(" ", 2)
80+
arguments = json.loads(args_json_str)
81+
82+
# ✅ 通过 MCP 会话调用实际工具
83+
tool_result = await session.call_tool(tool_name, arguments)
84+
85+
# 第二步:将工具结果返回给模型,生成最终回复
86+
messages_with_result = messages_for_tool_decision + [
87+
{"role": "assistant", "content": decision_text},
88+
{"role": "tool", "content": json.dumps(tool_result.model_dump(), ensure_ascii=False),
89+
"name": tool_name},
90+
{"role": "user", "content": "请根据以上工具调用结果,回答用户的问题。"}
91+
]
92+
93+
final_response = self.llm.chat.completions.create(
94+
model="/home/ma-user/work/Qwen2.5-1.5B-Instruct/",
95+
messages=messages_with_result,
96+
max_tokens=512,
97+
)
98+
result_with_tool["model_reply"] = final_response.choices[0].message.content
99+
result_with_tool["tool_called"] = tool_name
100+
result_with_tool["tool_arguments"] = arguments
101+
result_with_tool["tool_result"] = tool_result
102+
except Exception as e:
103+
result_with_tool["model_reply"] = f"工具调用解析错误: {e}。原始回复: {decision_text}"
104+
else:
105+
result_with_tool["model_reply"] = decision_text
106+
107+
# -------------------------------
108+
# 方法 2: 仅模型回复(无工具)
109+
# -------------------------------
110+
try:
111+
response_no_tool = self.llm.chat.completions.create(
112+
model="/home/ma-user/work/Qwen2.5-1.5B-Instruct/",
113+
messages=[{"role": "user", "content": user_query}],
114+
max_tokens=512,
115+
)
116+
message_no_tool = response_no_tool.choices[0].message
117+
result_no_tool = {
118+
"model_reply": message_no_tool.content
119+
}
120+
except Exception as e:
121+
result_no_tool = {
122+
"model_reply": f"调用 vLLM 失败(无工具): {e}"
123+
}
124+
125+
return {
126+
"user_query": user_query,
127+
"with_mcp_tool": result_with_tool,
128+
"without_tool": result_no_tool
129+
}
130+
131+
132+
async def main():
133+
client = MCPClientDemo(server_path="/home/ma-user/work/mcp/stdio_mcp.py")
134+
result = await client.run("nanjing的天气怎么样")
135+
136+
print(">>> 用户提问:", result["user_query"])
137+
print("\n【使用 MCP 工具】")
138+
print("模型回复:", result["with_mcp_tool"]["model_reply"])
139+
if result["with_mcp_tool"]["tool_called"]:
140+
print("调用工具:", result["with_mcp_tool"]["tool_called"])
141+
print("工具参数:", result["with_mcp_tool"]["tool_arguments"])
142+
print("工具结果:", result["with_mcp_tool"]["tool_result"])
143+
else:
144+
print("未调用任何工具")
145+
146+
print("\n【不使用工具】")
147+
print("模型回复:", result["without_tool"]["model_reply"])
148+
149+
150+
if __name__ == "__main__":
151+
asyncio.run(main())

0 commit comments

Comments
 (0)