From 181dc2ee28fd29f3679770c0aadc91375195ece2 Mon Sep 17 00:00:00 2001 From: Anirban Ray <39331844+yarnabrina@users.noreply.github.com> Date: Sun, 2 Nov 2025 19:47:06 +0530 Subject: [PATCH] simple mcp client with sampling capability --- .../simple-sampling-client/.env.example | 1 + .../clients/simple-sampling-client/.gitignore | 1 + .../simple-sampling-client/.python-version | 1 + .../clients/simple-sampling-client/README.md | 48 +++ .../mcp_simple_sampling_client/__init__.py | 1 + .../mcp_simple_sampling_client/main.py | 286 ++++++++++++++++++ .../simple-sampling-client/pyproject.toml | 51 ++++ uv.lock | 20 ++ 8 files changed, 409 insertions(+) create mode 100644 examples/clients/simple-sampling-client/.env.example create mode 100644 examples/clients/simple-sampling-client/.gitignore create mode 100644 examples/clients/simple-sampling-client/.python-version create mode 100644 examples/clients/simple-sampling-client/README.md create mode 100644 examples/clients/simple-sampling-client/mcp_simple_sampling_client/__init__.py create mode 100644 examples/clients/simple-sampling-client/mcp_simple_sampling_client/main.py create mode 100644 examples/clients/simple-sampling-client/pyproject.toml diff --git a/examples/clients/simple-sampling-client/.env.example b/examples/clients/simple-sampling-client/.env.example new file mode 100644 index 0000000000..9ecb08db07 --- /dev/null +++ b/examples/clients/simple-sampling-client/.env.example @@ -0,0 +1 @@ +API_KEY=YOUR_API_KEY diff --git a/examples/clients/simple-sampling-client/.gitignore b/examples/clients/simple-sampling-client/.gitignore new file mode 100644 index 0000000000..4c49bd78f1 --- /dev/null +++ b/examples/clients/simple-sampling-client/.gitignore @@ -0,0 +1 @@ +.env diff --git a/examples/clients/simple-sampling-client/.python-version b/examples/clients/simple-sampling-client/.python-version new file mode 100644 index 0000000000..c8cfe39591 --- /dev/null +++ b/examples/clients/simple-sampling-client/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/examples/clients/simple-sampling-client/README.md b/examples/clients/simple-sampling-client/README.md new file mode 100644 index 0000000000..cd5c29f786 --- /dev/null +++ b/examples/clients/simple-sampling-client/README.md @@ -0,0 +1,48 @@ +# Simple Sampling Client Example (MCP) + +This example demonstrates how to use the sampling capability of the MCP SDK with an OpenAI-compatible client. It shows how to: + +- Connect to an MCP server +- Fetch available tools +- Use OpenAI's API for chat completions +- Call MCP tools from the client + +## Prerequisites + +- Python 3.13+ +- [uv](https://github.com/astral-sh/uv) for dependency management +- An OpenAI API key (set in a `.env` file or as an environment variable) + +## Setup + +1. Install dependencies: + + ```sh + cd examples/clients/simple-sampling-client/ + uv sync + ``` + +2. Set environment variables in a `.env` file. A sample `.env` file is provided as `.env.example`. + +3. Start the MCP server in a separate terminal: + + ```sh + cd examples/snippets/servers/ + uv run server sampling streamable-http + ``` + +4. Run the sampling client in previous terminal: + + ```sh + uv run mcp-simple-sampling-client + ``` + +## Usage + +You will be prompted to enter a message. Type your message and press Enter. The assistant will respond using the sampling capability and may call MCP tools as needed. + +Type `exit` or `quit` to stop the client. + +## Code Overview + +For more details, see the source code in `mcp_simple_sampling_client/main.py`. diff --git a/examples/clients/simple-sampling-client/mcp_simple_sampling_client/__init__.py b/examples/clients/simple-sampling-client/mcp_simple_sampling_client/__init__.py new file mode 100644 index 0000000000..7fed129acc --- /dev/null +++ b/examples/clients/simple-sampling-client/mcp_simple_sampling_client/__init__.py @@ -0,0 +1 @@ +"""Simple sampling client for MCP.""" diff --git a/examples/clients/simple-sampling-client/mcp_simple_sampling_client/main.py b/examples/clients/simple-sampling-client/mcp_simple_sampling_client/main.py new file mode 100644 index 0000000000..4669695fbc --- /dev/null +++ b/examples/clients/simple-sampling-client/mcp_simple_sampling_client/main.py @@ -0,0 +1,286 @@ +"""Define a simple MCP client that supports sampling.""" + +import asyncio +import http +import json +import typing + +import httpx +import pydantic_settings + +import mcp +from mcp.client.streamable_http import streamablehttp_client +from mcp.shared.context import RequestContext +from mcp.types import CreateMessageRequestParams, CreateMessageResult, ErrorData, TextContent, Tool + + +class Configurations(pydantic_settings.BaseSettings): + """Define configurations for the sampling client.""" + + base_url: str = "https://api.openai.com/v1" + api_key: str = "your_api_key" + chat_model: str = "gpt-4o-mini" + max_tokens: int = 1024 + mcp_server_host: str = "localhost" + mcp_server_port: int = 8000 + system_prompt: str = "You are a helpful assistant." + + model_config = pydantic_settings.SettingsConfigDict(env_file=".env", env_file_encoding="utf-8") + + +class SamplingClient: + """Define a simple MCP client that supports sampling. + + Parameters + ---------- + config : Configurations + The configurations for the sampling client. + """ + + def __init__(self: "SamplingClient", config: Configurations) -> None: + self.config = config + + self.server_url = f"http://{self.config.mcp_server_host}:{self.config.mcp_server_port}/mcp" + self.api_url = f"{self.config.base_url}/chat/completions" + + self.conversation_history: list[dict[str, str]] = [] + + def get_llm_response( + self: "SamplingClient", + chat_history: list[dict[str, str]], + system_prompt: str, + max_tokens: int, + tools: list[dict[str, typing.Any]] | None = None, + ) -> dict[str, typing.Any]: + """Get a non-streaming response from OpenAI compatible LLM API. + + Parameters + ---------- + chat_history : list[dict[str, str]] + The chat history to use for the chat completion. + system_prompt : str + The system prompt to use for the chat completion. + max_tokens : int + The maximum number of tokens to generate in the response. + tools : list[dict[str, typing.Any]] | None, optional + The tools to use for the chat completion, by default None. + + Returns + ------- + dict[str, typing.Any] + The response from the LLM API. + """ + updated_chat_history = [ + {"content": system_prompt, "role": "system"}, + *chat_history, + ] + + extra_arguments = {} if tools is None else {"tool_choice": "auto", "tools": tools} + + chat_completion = httpx.post( + self.api_url, + json={ + "messages": updated_chat_history, + "model": self.config.chat_model, + "max_completion_tokens": max_tokens, + "n": 1, + "stream": False, + **extra_arguments, + }, + headers={ + "Authorization": f"Bearer {self.config.api_key}", + "Content-Type": "application/json", + }, + timeout=300, + ) + chat_completion.raise_for_status() + + return chat_completion.json() + + async def fetch_mcp_tools(self: "SamplingClient") -> list[Tool]: + """List available tools.""" + async with streamablehttp_client(self.server_url) as (read_stream, write_stream, _): + async with mcp.ClientSession(read_stream, write_stream) as session: + await session.initialize() + + server_tools = await session.list_tools() + + return server_tools.tools + + @staticmethod + def convert_to_openai_tools(mcp_tools: list[Tool]) -> list[dict[str, typing.Any]]: + """Convert MCP tools to OpenAI tool call parameters. + + Parameters + ---------- + mcp_tools : list[Tool] + List of MCP tools to convert. + + Returns + ------- + list[dict[str, typing.Any]] + List of OpenAI tool call parameters. + """ + return [ + { + "function": {"name": tool.name, "description": tool.description or "", "parameters": tool.inputSchema}, + "type": "function", + } + for tool in mcp_tools + ] + + async def sampling_handler( + self: "SamplingClient", context: RequestContext[typing.Any, typing.Any], parameters: CreateMessageRequestParams + ) -> CreateMessageResult | ErrorData: + """Handle sampling requests for OpenAI API calls with MCP tools. + + Parameters + ---------- + context : RequestContext[typing.Any, typing.Any] + request context containing information about the sampling request + parameters : CreateMessageRequestParams + parameters for the sampling request, including messages and customisations + + Returns + ------- + CreateMessageResult | ErrorData + result of the sampling request, either a message result or an error data + """ + del context + + openai_response = self.get_llm_response( + [ + { + "content": message.content.text + if isinstance(message.content, TextContent) + else str(message.content), + "role": "user", + } + for message in parameters.messages + ], + parameters.systemPrompt or self.config.system_prompt, + parameters.maxTokens, + ) + + if not (choices := openai_response["choices"]): + return ErrorData( + code=http.HTTPStatus.INTERNAL_SERVER_ERROR, + message="No choices returned from OpenAI API.", + ) + + choice = choices[0] + sampling_response_message = choice["message"]["content"] or "" + + return CreateMessageResult( + role="assistant", + content=TextContent(type="text", text=sampling_response_message), + model=self.config.chat_model, + stopReason=choice["finish_reason"], + ) + + async def execute_tool_call(self: "SamplingClient", tool_name: str, arguments: dict[str, typing.Any]) -> str: + """Execute a tool call on an MCP server. + + Parameters + ---------- + tool_name : str + name of the tool to call, formatted as "mcp-{server_name}-{tool_name}" + arguments : dict[str, typing.Any] + arguments to pass to the tool call + + Returns + ------- + str + JSON string containing the result of the tool call or an error message + """ + async with streamablehttp_client(self.server_url) as (read_stream, write_stream, _): + async with mcp.ClientSession(read_stream, write_stream, sampling_callback=self.sampling_handler) as session: + await session.initialize() + + tool_result = await session.call_tool(tool_name, arguments=arguments) + + if tool_result.isError: + error_message = "".join(content.text for content in tool_result.content if isinstance(content, TextContent)) + + return json.dumps({"error": (f"Failed tool call to {tool_name=} with {arguments=}: {error_message}.")}) + + if (structured_result := tool_result.structuredContent) is not None: + return json.dumps(structured_result) + + return json.dumps([element.model_dump() for element in tool_result.content]) + + async def orchestrate(self: "SamplingClient", user_message: str) -> None: + """Orchestrate the sampling client to handle requests.""" + self.conversation_history.append({"role": "user", "content": user_message}) + + self.mcp_server_tools = await self.fetch_mcp_tools() + self.openai_compatible_tools = self.convert_to_openai_tools(self.mcp_server_tools) + + openai_response = self.get_llm_response( + self.conversation_history, + self.config.system_prompt, + self.config.max_tokens, + tools=self.openai_compatible_tools, + ) + + if not (choices := openai_response["choices"]): + error_message = "No choices returned from OpenAI API." + self.conversation_history.append({"role": "assistant", "content": error_message}) + + print(error_message) + + return + + choice = choices[0] + + while choice["finish_reason"] == "tool_calls": + for tool_call in choice["message"]["tool_calls"] or []: + if tool_call["type"] != "function": + continue + + tool_response = await self.execute_tool_call( + tool_call["function"]["name"], json.loads(tool_call["function"]["arguments"]) + ) + + self.conversation_history.append( + {"role": "assistant", "content": f"Tool {tool_call['id']} returned: {tool_response}"} + ) + + openai_response = self.get_llm_response( + self.conversation_history, + self.config.system_prompt, + self.config.max_tokens, + tools=self.openai_compatible_tools, + ) + + if not (choices := openai_response["choices"]): + error_message = "No choices returned from OpenAI API." + self.conversation_history.append({"role": "assistant", "content": error_message}) + + print(error_message) + + return + + choice = choices[0] + + assistant_message = choice["message"]["content"] or "" + self.conversation_history.append({"role": "assistant", "content": assistant_message}) + + print(f"Assistant: {assistant_message}") + + +def main(): + """Run the sampling client.""" + config = Configurations() + + sampling_client = SamplingClient(config) + + user_message = input("User: ") + while user_message.lower() not in {"exit", "quit"}: + asyncio.run(sampling_client.orchestrate(user_message)) + + user_message = input("User: ") + + +if __name__ == "__main__": + main() diff --git a/examples/clients/simple-sampling-client/pyproject.toml b/examples/clients/simple-sampling-client/pyproject.toml new file mode 100644 index 0000000000..03eac94bfd --- /dev/null +++ b/examples/clients/simple-sampling-client/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = [ + "uv-build>=0.8.22,<0.9", +] +build-backend = "uv_build" + +[project] +name = "mcp-simple-sampling-client" +version = "0.1.0" +description = "Exploring different features of MCP servers and clients" +readme = { file = "README.md", content-type = "text/markdown" } +requires-python = ">=3.10" +license = "MIT" +authors = [ + { name = "Anirban Ray", email = "39331844+yarnabrina@users.noreply.github.com" } +] +keywords = [ + "mcp", + "mcp-client", + "sampling", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Framework :: Pydantic", + "Framework :: Pydantic :: 2", + "Intended Audience :: Developers", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Software Development", + "Topic :: Utilities", + "Typing :: Typed", +] +dependencies = [ + "httpx>=0.27.1", + "mcp>=1.16.0,<2", + "pydantic>=2.11.0,<3.0.0", + "pydantic-settings>=2.5.2", +] + +[project.scripts] +mcp-simple-sampling-client = "mcp_simple_sampling_client.main:main" + +[tool.uv.build-backend] +module-name = "mcp_simple_sampling_client" +module-root = "" diff --git a/uv.lock b/uv.lock index 7c087ce73e..61cabc201c 100644 --- a/uv.lock +++ b/uv.lock @@ -11,6 +11,7 @@ members = [ "mcp-simple-pagination", "mcp-simple-prompt", "mcp-simple-resource", + "mcp-simple-sampling-client", "mcp-simple-streamablehttp", "mcp-simple-streamablehttp-stateless", "mcp-simple-tool", @@ -944,6 +945,25 @@ dev = [ { name = "ruff", specifier = ">=0.6.9" }, ] +[[package]] +name = "mcp-simple-sampling-client" +version = "0.1.0" +source = { editable = "examples/clients/simple-sampling-client" } +dependencies = [ + { name = "httpx" }, + { name = "mcp" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpx", specifier = ">=0.27.1" }, + { name = "mcp", editable = "." }, + { name = "pydantic", specifier = ">=2.11.0,<3.0.0" }, + { name = "pydantic-settings", specifier = ">=2.5.2" }, +] + [[package]] name = "mcp-simple-streamablehttp" version = "0.1.0"