Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aider/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from packaging import version

__version__ = "0.88.1.dev"
__version__ = "0.88.2.dev"
safe_version = __version__

try:
Expand Down
2 changes: 1 addition & 1 deletion aider/coders/architect_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ async def reply_completed(self):
if not content or not content.strip():
return

if not self.auto_accept_architect and not self.io.confirm_ask("Edit the files?"):
if not self.auto_accept_architect and not await self.io.confirm_ask("Edit the files?"):
return

kwargs = dict()
Expand Down
20 changes: 11 additions & 9 deletions aider/coders/base_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,9 +616,6 @@ def __init__(
except Exception as e:
self.io.tool_warning(f"Could not remove todo list file {todo_file_path}: {e}")

# Instantiate MCP tools
if self.mcp_servers:
pass
# validate the functions jsonschema
if self.functions:
from jsonschema import Draft7Validator
Expand Down Expand Up @@ -1755,7 +1752,7 @@ def warm_cache_worker():

return chunks

def check_tokens(self, messages):
async def check_tokens(self, messages):
"""Check if the messages will fit within the model's token limits."""
input_tokens = self.main_model.token_count(messages)
max_input_tokens = self.main_model.info.get("max_input_tokens") or 0
Expand All @@ -1774,7 +1771,7 @@ def check_tokens(self, messages):
" the context limit is exceeded."
)

if not self.io.confirm_ask("Try to proceed anyway?"):
if not await self.io.confirm_ask("Try to proceed anyway?"):
return False
return True

Expand All @@ -1792,7 +1789,7 @@ async def send_message(self, inp):
chunks = self.format_messages()
messages = chunks.all_messages()

if not self.check_tokens(messages):
if not await self.check_tokens(messages):
return
self.warm_cache(chunks)

Expand Down Expand Up @@ -2352,7 +2349,8 @@ async def get_server_tools(server):
)
return (server.name, server_tools)
except Exception as e:
self.io.tool_warning(f"Error initializing MCP server {server.name}:\n{e}")
if server.name != "unnamed-server":
self.io.tool_warning(f"Error initializing MCP server {server.name}:\n{e}")
return None

async def get_all_server_tools():
Expand Down Expand Up @@ -2604,7 +2602,7 @@ async def send(self, messages, model=None, functions=None, tools=None):
)
self.chat_completion_call_hashes.append(hash_object.hexdigest())

if self.stream:
if not isinstance(completion, ModelResponse):
async for chunk in self.show_send_output_stream(completion):
yield chunk
else:
Expand Down Expand Up @@ -2640,6 +2638,10 @@ def show_send_output(self, completion):
if self.verbose:
print(completion)

if not isinstance(completion, ModelResponse):
self.io.tool_error(str(completion))
return

if not completion.choices:
self.io.tool_error(str(completion))
return
Expand Down Expand Up @@ -3092,7 +3094,7 @@ async def allowed_to_edit(self, path):
return

if not Path(full_path).exists():
if not self.io.confirm_ask("Create new file?", subject=path):
if not await self.io.confirm_ask("Create new file?", subject=path):
self.io.tool_output(f"Skipping edits to {path}")
return

Expand Down
2 changes: 1 addition & 1 deletion aider/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -1301,7 +1301,7 @@ def assistant_output(self, message, pretty=None):
else:
show_resp = Text(message or "(empty response)")

self.stream_print(show_resp)
self.console.print(show_resp)

def render_markdown(self, text):
output = StringIO()
Expand Down
17 changes: 17 additions & 0 deletions aider/mcp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,4 +154,21 @@ def load_mcp_servers(mcp_servers, mcp_servers_file, io, verbose=False, mcp_trans
if mcp_servers_file:
servers = _parse_mcp_servers_from_file(mcp_servers_file, io, verbose, mcp_transport)

if not servers:
# A default MCP server is actually now necessary for the overall agentic loop
# and a dummy server does suffice for the job
# because I am not smart enough to figure out why
# on coder switch, the agent actually initializes the prompt area twice
# once immediately after input for the old coder
# and immediately again for the new target coder
# which causes a race condition where we are awaiting a coroutine
# that can no longer yield control (somehow?)
# but somehow having to run through the MCP server checks
# allows control to be yielded again somehow
# and I cannot figure out just how that is happening
# and maybe it is actually prompt_toolkit's fault
# but this hack works swimmingly because ???
# so sure! why not
servers = [McpServer(json.loads('{"aider_default": {}}'))]

return servers
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ dynamic = ["dependencies", "optional-dependencies", "version"]
Homepage = "https://github.com/dwash96/aider-ce"

[project.scripts]
aider = "aider.main:main"
aider-ce = "aider.main:main"

[tool.setuptools.dynamic]
Expand Down
Loading