Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@ This repository contains a protocol-level CLI designed to interact with a Model
- Protocol-level communication with the MCP Server.
- Dynamic tool and resource exploration.
- Support for multiple providers and models:
- Providers: OpenAI, Ollama.
- Default models: `gpt-4o-mini` for OpenAI, `qwen2.5-coder` for Ollama.
- Providers: OpenAI, Ollama, Amazon Bedrock
- Default models: `gpt-4o-mini` for OpenAI, `qwen2.5-coder` for Ollama, `Claude-3.5-sonnet`for Amazon Bedrock.

## Prerequisites
- Python 3.8 or higher.
- Required dependencies (see [Installation](#installation))
- If using ollama you should have ollama installed and running.
- If using openai you should have an api key set in your environment variables (OPENAI_API_KEY=yourkey)
- if using Amazon Bedrock you should have an access key and secret access key.

## Installation
1. Clone the repository:
Expand Down Expand Up @@ -43,11 +44,17 @@ uv run mcp-cli --server sqlite

### Command-line Arguments
- `--server`: Specifies the server configuration to use. Required.

- `--config-file`: (Optional) Path to the JSON configuration file. Defaults to `server_config.json`.

- `--provider`: (Optional) Specifies the provider to use (`openai` or `ollama`). Defaults to `openai`.

- `--model`: (Optional) Specifies the model to use. Defaults depend on the provider:
- `gpt-4o-mini` for OpenAI.
- `llama3.2` for Ollama.
- `claude-3.5-sonnet` ,`claude-3.5-haiku`, `nova-lite`,`nova-pro` for Amazone Bedrock

- `--aws-region`: Specifies the AWS Region configuration to use. Default to us-east-1.

### Examples
Run the client with the default OpenAI provider and model:
Expand All @@ -62,7 +69,14 @@ Run the client with a specific configuration and Ollama provider:
uv run mcp-cli --server sqlite --provider ollama --model llama3.2
```

Run the client with Amazone Bedrock provider :

```bash
uv run mcp-cli --server sqlite --provider amazon --aws-region us-west-2
```

## Interactive Mode

The client supports interactive mode, allowing you to execute commands dynamically. Type `help` for a list of available commands or `quit` to exit the program.

## Supported Commands
Expand Down
5 changes: 5 additions & 0 deletions server_config.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
{
"mcpServers": {
"sqlite": {
"transport": "stdio",
"command": "uvx",
"args": ["mcp-server-sqlite", "--db-path", "test.db"]
},
"fetch": {
"transport": "sse",
"endpoint": "http://localhost:3001/sse"
}
}
}
46 changes: 34 additions & 12 deletions src/mcpcli/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,10 @@
from mcpcli.messages.send_initialize_message import send_initialize
from mcpcli.messages.send_call_tool import send_call_tool
from mcpcli.messages.send_tools_list import send_tools_list
from mcpcli.transport.sse.sse_client import sse_client
from mcpcli.transport.sse.sse_server_parameters import SSEServerParameters
from mcpcli.transport.stdio.stdio_client import stdio_client
from mcpcli.transport.stdio.stdio_server_parameters import StdioServerParameters

# Default path for the configuration file
DEFAULT_CONFIG_FILE = "server_config.json"
Expand Down Expand Up @@ -286,19 +289,29 @@ async def run(config_path: str, server_names: List[str], command: str = None) ->
# Load server configurations and establish connections for all servers
server_streams = []
context_managers = []
client = None
for server_name in server_names:
server_params = await load_config(config_path, server_name)

# Establish stdio communication for each server
cm = stdio_client(server_params)
(read_stream, write_stream) = await cm.__aenter__()
context_managers.append(cm)
server_streams.append((read_stream, write_stream))
# Establish stdio or sse communication for each server
if isinstance(server_params, StdioServerParameters):
cm = stdio_client(server_params)
(read_stream, write_stream) = await cm.__aenter__()
context_managers.append(cm)
server_streams.append((read_stream, write_stream))

init_result = await send_initialize(read_stream, write_stream)
if not init_result:
print(f"[red]Server initialization failed for {server_name}[/red]")
return
init_result = await send_initialize(read_stream, write_stream)
if not init_result:
print(f"[red]Server initialization failed for {server_name}[/red]")
return
elif isinstance(server_params, SSEServerParameters):
client = sse_client(server_params.endpoint)
(read_stream, write_stream) = await client.__aenter__()
context_managers.append(client)
server_streams.append((read_stream, write_stream))

else:
raise ValueError("Server transport not supported")

try:
if command:
Expand Down Expand Up @@ -340,23 +353,32 @@ def cli_main():

parser.add_argument(
"--provider",
choices=["openai", "ollama"],
choices=["openai", "ollama","amazon"],
default="openai",
help="LLM provider to use. Defaults to 'openai'.",
)

parser.add_argument(
"--model",
help=("Model to use. Defaults to 'gpt-4o-mini' for 'openai' and 'qwen2.5-coder' for 'ollama'."),
help=("Model to use. Defaults to 'gpt-4o-mini' for 'openai' and 'qwen2.5-coder' for 'ollama', 'Claude-3-5-sonnet' for 'amazon'."),
)

parser.add_argument(
"--aws-region",
default="us-east-1",
help=("AWS region to use. Defaults to 'us-east-1'."),
)

args = parser.parse_args()

model = args.model or (
"gpt-4o-mini" if args.provider == "openai" else "qwen2.5-coder"
"gpt-4o-mini" if args.provider == "openai"
else "claude-3.5-sonnet" if args.provider == "amazon"
else "qwen2.5-coder"
)
os.environ["LLM_PROVIDER"] = args.provider
os.environ["LLM_MODEL"] = model
os.environ["AWS_REGION"] = args.aws_region

try:
result = anyio.run(run, args.config_file, args.servers, args.command)
Expand Down
72 changes: 69 additions & 3 deletions src/mcpcli/chat_handler.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# chat_handler.py
import json
from datetime import datetime

from rich import print
from rich.markdown import Markdown
Expand Down Expand Up @@ -66,6 +67,74 @@ async def process_conversation(
response_content = completion.get("response", "No response")
tool_calls = completion.get("tool_calls", [])

# Save assistant response with additional metadata if provider is amazon
if client.provider == "amazon":
content = []
if response_content:
content.append({"text": response_content})

if tool_calls:
for tool_call in tool_calls:
if hasattr(tool_call, "function"):
tool_use = {
"toolUse": {
"toolUseId": tool_call.id if hasattr(tool_call, 'id') else None,
"name": tool_call.function.name.replace("-", "_"),
"input": json.loads(tool_call.function.arguments)
}
}
content.append(tool_use)
elif isinstance(tool_call, dict) and "function" in tool_call:
tool_use = {
"toolUse": {
"toolUseId": tool_call.get("id"),
"name": tool_call["function"]["name"].replace("-", "_"),
"input": json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"]
}
}
content.append(tool_use)

assistant_message = {
"role": "assistant",
"content": content,
"metadata": {
"timestamp": datetime.now().isoformat(),
"conversation_id": completion.get("conversation_id", ""),
}
}
conversation_history.append(assistant_message)
else:
content = []
if response_content:
content.append({"text": response_content})

if tool_calls:
for tool_call in tool_calls:
if hasattr(tool_call, "function"):
tool_use = {
"toolUse": {
"toolUseId": f"tooluse_{datetime.now().strftime('%Y%m%d%H%M%S')}",
"name": tool_call.function.name.replace("-", "_"),
"input": json.loads(tool_call.function.arguments)
}
}
content.append(tool_use)
elif isinstance(tool_call, dict) and "function" in tool_call:
tool_use = {
"toolUse": {
"toolUseId": f"tooluse_{datetime.now().strftime('%Y%m%d%H%M%S')}",
"name": tool_call["function"]["name"].replace("-", "_"),
"input": json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"]
}
}
content.append(tool_use)

assistant_message = {
"role": "assistant",
"content": content
}
#conversation_history.append(assistant_message)

if tool_calls:
for tool_call in tool_calls:
# Extract tool_name and raw_arguments as before
Expand All @@ -85,10 +154,8 @@ async def process_conversation(
try:
raw_arguments = json.loads(raw_arguments)
except json.JSONDecodeError:
# If it's not valid JSON, just display as is
pass

# Now raw_arguments should be a dict or something we can pretty-print as JSON
tool_args_str = json.dumps(raw_arguments, indent=2)

tool_md = f"**Tool Call:** {tool_name}\n\n```json\n{tool_args_str}\n```"
Expand All @@ -106,7 +173,6 @@ async def process_conversation(
print(
Panel(Markdown(assistant_panel_text), style="bold blue", title="Assistant")
)
conversation_history.append({"role": "assistant", "content": response_content})
break


Expand Down
44 changes: 33 additions & 11 deletions src/mcpcli/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
import json
import logging

from mcpcli.transport.sse.sse_server_parameters import SSEServerParameters
from mcpcli.transport.stdio.stdio_server_parameters import StdioServerParameters


async def load_config(config_path: str, server_name: str) -> StdioServerParameters:
async def load_config(config_path: str, server_name: str) -> StdioServerParameters|SSEServerParameters:
"""Load the server configuration from a JSON file."""
try:
# debug
Expand All @@ -17,23 +18,44 @@ async def load_config(config_path: str, server_name: str) -> StdioServerParamete

# Retrieve the server configuration
server_config = config.get("mcpServers", {}).get(server_name)


if not server_config:
error_msg = f"Server '{server_name}' not found in configuration file."
logging.error(error_msg)
raise ValueError(error_msg)

if "transport" not in server_config:
if "command" in server_config:
server_config["transport"] = "stdio"
elif "endpoint" in server_config:
server_config["transport"] = "sse"
else:
error_msg = f"Server transport not found in configuration file."
logging.error(error_msg)
raise ValueError(error_msg)

# Construct the server parameters
result = StdioServerParameters(
command=server_config["command"],
args=server_config.get("args", []),
env=server_config.get("env"),
)

# debug
logging.debug(
f"Loaded config: command='{result.command}', args={result.args}, env={result.env}"
)
if server_config["transport"] == "stdio":
result = StdioServerParameters(
command=server_config["command"],
args=server_config.get("args", []),
env=server_config.get("env"),
)
# debug
logging.debug(
f"Loaded config: command='{result.command}', args={result.args}, env={result.env}"
)
elif server_config["transport"] == "sse":
result = SSEServerParameters(
endpoint=server_config["endpoint"],
)
else:
error_msg = f"Server transport '{server_config['transport']}' not supported."
logging.error(error_msg)
raise ValueError(error_msg)


# return result
return result

Expand Down
Loading