Skip to content

Commit c5274c2

Browse files
committed
simple mcp client with sampling capability
1 parent 202af49 commit c5274c2

File tree

8 files changed

+409
-0
lines changed

8 files changed

+409
-0
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
API_KEY=YOUR_API_KEY
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.env
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.10
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# Simple Sampling Client Example (MCP)
2+
3+
This example demonstrates how to use the sampling capability of the MCP SDK with an OpenAI-compatible client. It shows how to:
4+
5+
- Connect to an MCP server
6+
- Fetch available tools
7+
- Use OpenAI's API for chat completions
8+
- Call MCP tools from the client
9+
10+
## Prerequisites
11+
12+
- Python 3.13+
13+
- [uv](https://github.com/astral-sh/uv) for dependency management
14+
- An OpenAI API key (set in a `.env` file or as an environment variable)
15+
16+
## Setup
17+
18+
1. Install dependencies:
19+
20+
```sh
21+
cd examples/clients/simple-sampling-client/
22+
uv sync
23+
```
24+
25+
2. Set environment variables in a `.env` file. A sample `.env` file is provided as `.env.example`.
26+
27+
3. Start the MCP server in a separate terminal:
28+
29+
```sh
30+
cd examples/snippets/servers/
31+
uv run server sampling streamable-http
32+
```
33+
34+
4. Run the sampling client in previous terminal:
35+
36+
```sh
37+
uv run mcp-simple-sampling-client
38+
```
39+
40+
## Usage
41+
42+
You will be prompted to enter a message. Type your message and press Enter. The assistant will respond using the sampling capability and may call MCP tools as needed.
43+
44+
Type `exit` or `quit` to stop the client.
45+
46+
## Code Overview
47+
48+
For more details, see the source code in `mcp_simple_sampling_client/main.py`.
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""Simple sampling client for MCP."""
Lines changed: 286 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,286 @@
1+
"""Define a simple MCP client that supports sampling."""
2+
3+
import asyncio
4+
import http
5+
import json
6+
import typing
7+
8+
import httpx
9+
import pydantic_settings
10+
11+
import mcp
12+
from mcp.client.streamable_http import streamablehttp_client
13+
from mcp.shared.context import RequestContext
14+
from mcp.types import CreateMessageRequestParams, CreateMessageResult, ErrorData, TextContent, Tool
15+
16+
17+
class Configurations(pydantic_settings.BaseSettings):
18+
"""Define configurations for the sampling client."""
19+
20+
base_url: str = "https://api.openai.com/v1"
21+
api_key: str = "your_api_key"
22+
chat_model: str = "gpt-4o-mini"
23+
max_tokens: int = 1024
24+
mcp_server_host: str = "localhost"
25+
mcp_server_port: int = 8000
26+
system_prompt: str = "You are a helpful assistant."
27+
28+
model_config = pydantic_settings.SettingsConfigDict(env_file=".env", env_file_encoding="utf-8")
29+
30+
31+
class SamplingClient:
32+
"""Define a simple MCP client that supports sampling.
33+
34+
Parameters
35+
----------
36+
config : Configurations
37+
The configurations for the sampling client.
38+
"""
39+
40+
def __init__(self: "SamplingClient", config: Configurations) -> None:
41+
self.config = config
42+
43+
self.server_url = f"http://{self.config.mcp_server_host}:{self.config.mcp_server_port}/mcp"
44+
self.api_url = f"{self.config.base_url}/chat/completions"
45+
46+
self.conversation_history: list[dict[str, str]] = []
47+
48+
def get_llm_response(
49+
self: "SamplingClient",
50+
chat_history: list[dict[str, str]],
51+
system_prompt: str,
52+
max_tokens: int,
53+
tools: list[dict[str, typing.Any]] | None = None,
54+
) -> dict[str, typing.Any]:
55+
"""Get a non-streaming response from OpenAI compatible LLM API.
56+
57+
Parameters
58+
----------
59+
chat_history : list[dict[str, str]]
60+
The chat history to use for the chat completion.
61+
system_prompt : str
62+
The system prompt to use for the chat completion.
63+
max_tokens : int
64+
The maximum number of tokens to generate in the response.
65+
tools : list[dict[str, typing.Any]] | None, optional
66+
The tools to use for the chat completion, by default None.
67+
68+
Returns
69+
-------
70+
dict[str, typing.Any]
71+
The response from the LLM API.
72+
"""
73+
updated_chat_history = [
74+
{"content": system_prompt, "role": "system"},
75+
*chat_history,
76+
]
77+
78+
extra_arguments = {} if tools is None else {"tool_choice": "auto", "tools": tools}
79+
80+
chat_completion = httpx.post(
81+
self.api_url,
82+
json={
83+
"messages": updated_chat_history,
84+
"model": self.config.chat_model,
85+
"max_completion_tokens": max_tokens,
86+
"n": 1,
87+
"stream": False,
88+
**extra_arguments,
89+
},
90+
headers={
91+
"Authorization": f"Bearer {self.config.api_key}",
92+
"Content-Type": "application/json",
93+
},
94+
timeout=300,
95+
)
96+
chat_completion.raise_for_status()
97+
98+
return chat_completion.json()
99+
100+
async def fetch_mcp_tools(self: "SamplingClient") -> list[Tool]:
101+
"""List available tools."""
102+
async with streamablehttp_client(self.server_url) as (read_stream, write_stream, _):
103+
async with mcp.ClientSession(read_stream, write_stream) as session:
104+
await session.initialize()
105+
106+
server_tools = await session.list_tools()
107+
108+
return server_tools.tools
109+
110+
@staticmethod
111+
def convert_to_openai_tools(mcp_tools: list[Tool]) -> list[dict[str, typing.Any]]:
112+
"""Convert MCP tools to OpenAI tool call parameters.
113+
114+
Parameters
115+
----------
116+
mcp_tools : list[Tool]
117+
List of MCP tools to convert.
118+
119+
Returns
120+
-------
121+
list[dict[str, typing.Any]]
122+
List of OpenAI tool call parameters.
123+
"""
124+
return [
125+
{
126+
"function": {"name": tool.name, "description": tool.description or "", "parameters": tool.inputSchema},
127+
"type": "function",
128+
}
129+
for tool in mcp_tools
130+
]
131+
132+
async def sampling_handler(
133+
self: "SamplingClient", context: RequestContext[typing.Any, typing.Any], parameters: CreateMessageRequestParams
134+
) -> CreateMessageResult | ErrorData:
135+
"""Handle sampling requests for OpenAI API calls with MCP tools.
136+
137+
Parameters
138+
----------
139+
context : RequestContext[typing.Any, typing.Any]
140+
request context containing information about the sampling request
141+
parameters : CreateMessageRequestParams
142+
parameters for the sampling request, including messages and customisations
143+
144+
Returns
145+
-------
146+
CreateMessageResult | ErrorData
147+
result of the sampling request, either a message result or an error data
148+
"""
149+
del context
150+
151+
openai_response = self.get_llm_response(
152+
[
153+
{
154+
"content": message.content.text
155+
if isinstance(message.content, TextContent)
156+
else str(message.content),
157+
"role": "user",
158+
}
159+
for message in parameters.messages
160+
],
161+
parameters.systemPrompt or self.config.system_prompt,
162+
parameters.maxTokens,
163+
)
164+
165+
if not (choices := openai_response["choices"]):
166+
return ErrorData(
167+
code=http.HTTPStatus.INTERNAL_SERVER_ERROR,
168+
message="No choices returned from OpenAI API.",
169+
)
170+
171+
choice = choices[0]
172+
sampling_response_message = choice["message"]["content"] or ""
173+
174+
return CreateMessageResult(
175+
role="assistant",
176+
content=TextContent(type="text", text=sampling_response_message),
177+
model=self.config.chat_model,
178+
stopReason=choice["finish_reason"],
179+
)
180+
181+
async def execute_tool_call(self: "SamplingClient", tool_name: str, arguments: dict[str, typing.Any]) -> str:
182+
"""Execute a tool call on an MCP server.
183+
184+
Parameters
185+
----------
186+
tool_name : str
187+
name of the tool to call, formatted as "mcp-{server_name}-{tool_name}"
188+
arguments : dict[str, typing.Any]
189+
arguments to pass to the tool call
190+
191+
Returns
192+
-------
193+
str
194+
JSON string containing the result of the tool call or an error message
195+
"""
196+
async with streamablehttp_client(self.server_url) as (read_stream, write_stream, _):
197+
async with mcp.ClientSession(read_stream, write_stream, sampling_callback=self.sampling_handler) as session:
198+
await session.initialize()
199+
200+
tool_result = await session.call_tool(tool_name, arguments=arguments)
201+
202+
if tool_result.isError:
203+
error_message = "".join(content.text for content in tool_result.content if isinstance(content, TextContent))
204+
205+
return json.dumps({"error": (f"Failed tool call to {tool_name=} with {arguments=}: {error_message}.")})
206+
207+
if (structured_result := tool_result.structuredContent) is not None:
208+
return json.dumps(structured_result)
209+
210+
return json.dumps([element.model_dump() for element in tool_result.content])
211+
212+
async def orchestrate(self: "SamplingClient", user_message: str) -> None:
213+
"""Orchestrate the sampling client to handle requests."""
214+
self.conversation_history.append({"role": "user", "content": user_message})
215+
216+
self.mcp_server_tools = await self.fetch_mcp_tools()
217+
self.openai_compatible_tools = self.convert_to_openai_tools(self.mcp_server_tools)
218+
219+
openai_response = self.get_llm_response(
220+
self.conversation_history,
221+
self.config.system_prompt,
222+
self.config.max_tokens,
223+
tools=self.openai_compatible_tools,
224+
)
225+
226+
if not (choices := openai_response["choices"]):
227+
error_message = "No choices returned from OpenAI API."
228+
self.conversation_history.append({"role": "assistant", "content": error_message})
229+
230+
print(error_message)
231+
232+
return
233+
234+
choice = choices[0]
235+
236+
while choice["finish_reason"] == "tool_calls":
237+
for tool_call in choice["message"]["tool_calls"] or []:
238+
if tool_call["type"] != "function":
239+
continue
240+
241+
tool_response = await self.execute_tool_call(
242+
tool_call["function"]["name"], json.loads(tool_call["function"]["arguments"])
243+
)
244+
245+
self.conversation_history.append(
246+
{"role": "assistant", "content": f"Tool {tool_call['id']} returned: {tool_response}"}
247+
)
248+
249+
openai_response = self.get_llm_response(
250+
self.conversation_history,
251+
self.config.system_prompt,
252+
self.config.max_tokens,
253+
tools=self.openai_compatible_tools,
254+
)
255+
256+
if not (choices := openai_response["choices"]):
257+
error_message = "No choices returned from OpenAI API."
258+
self.conversation_history.append({"role": "assistant", "content": error_message})
259+
260+
print(error_message)
261+
262+
return
263+
264+
choice = choices[0]
265+
266+
assistant_message = choice["message"]["content"] or ""
267+
self.conversation_history.append({"role": "assistant", "content": assistant_message})
268+
269+
print(f"Assistant: {assistant_message}")
270+
271+
272+
def main():
273+
"""Run the sampling client."""
274+
config = Configurations()
275+
276+
sampling_client = SamplingClient(config)
277+
278+
user_message = input("User: ")
279+
while user_message.lower() not in {"exit", "quit"}:
280+
asyncio.run(sampling_client.orchestrate(user_message))
281+
282+
user_message = input("User: ")
283+
284+
285+
if __name__ == "__main__":
286+
main()
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
[build-system]
2+
requires = [
3+
"uv-build>=0.8.22,<0.9",
4+
]
5+
build-backend = "uv_build"
6+
7+
[project]
8+
name = "mcp-simple-sampling-client"
9+
version = "0.1.0"
10+
description = "Exploring different features of MCP servers and clients"
11+
readme = { file = "README.md", content-type = "text/markdown" }
12+
requires-python = ">=3.10"
13+
license = "MIT"
14+
authors = [
15+
{ name = "Anirban Ray", email = "39331844+yarnabrina@users.noreply.github.com" }
16+
]
17+
keywords = [
18+
"mcp",
19+
"mcp-client",
20+
"sampling",
21+
]
22+
classifiers = [
23+
"Development Status :: 4 - Beta",
24+
"Framework :: Pydantic",
25+
"Framework :: Pydantic :: 2",
26+
"Intended Audience :: Developers",
27+
"Operating System :: OS Independent",
28+
"Programming Language :: Python",
29+
"Programming Language :: Python :: 3",
30+
"Programming Language :: Python :: 3 :: Only",
31+
"Programming Language :: Python :: 3.10",
32+
"Programming Language :: Python :: 3.11",
33+
"Programming Language :: Python :: 3.12",
34+
"Programming Language :: Python :: 3.13",
35+
"Topic :: Software Development",
36+
"Topic :: Utilities",
37+
"Typing :: Typed",
38+
]
39+
dependencies = [
40+
"httpx>=0.27.1",
41+
"mcp>=1.16.0,<2",
42+
"pydantic>=2.11.0,<3.0.0",
43+
"pydantic-settings>=2.5.2",
44+
]
45+
46+
[project.scripts]
47+
mcp-simple-sampling-client = "mcp_simple_sampling_client.main:main"
48+
49+
[tool.uv.build-backend]
50+
module-name = "mcp_simple_sampling_client"
51+
module-root = ""

0 commit comments

Comments
 (0)