Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aider/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from packaging import version

__version__ = "0.88.20.dev"
__version__ = "0.88.21.dev"
safe_version = __version__

try:
Expand Down
13 changes: 9 additions & 4 deletions aider/coders/architect_coder.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from ..commands import SwitchCoder
from .architect_prompts import ArchitectPrompts
from .ask_coder import AskCoder
from .base_coder import Coder
Expand Down Expand Up @@ -41,8 +42,12 @@ async def reply_completed(self):
if self.verbose:
editor_coder.show_announcements()

await editor_coder.run(with_message=content, preproc=False)
try:
await editor_coder.generate(user_message=content, preproc=False)
self.move_back_cur_messages("I made those changes to the files.")
self.total_cost = editor_coder.total_cost
self.aider_commit_hashes = editor_coder.aider_commit_hashes
except Exception as e:
self.io.tool_error(e)

self.move_back_cur_messages("I made those changes to the files.")
self.total_cost = editor_coder.total_cost
self.aider_commit_hashes = editor_coder.aider_commit_hashes
raise SwitchCoder(main_model=self.main_model, edit_format="architect")
378 changes: 237 additions & 141 deletions aider/coders/base_coder.py

Large diffs are not rendered by default.

12 changes: 11 additions & 1 deletion aider/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -1498,6 +1498,9 @@ async def _generic_chat_command(self, args, edit_format, placeholder=None):

from aider.coders.base_coder import Coder

main_model = self.coder.main_model
edit_format = self.coder.edit_format

coder = await Coder.create(
io=self.io,
from_coder=self.coder,
Expand All @@ -1508,9 +1511,16 @@ async def _generic_chat_command(self, args, edit_format, placeholder=None):
)

user_msg = args
await coder.run(user_msg, False)
await coder.generate(user_message=user_msg, preproc=False)
self.coder.aider_commit_hashes = coder.aider_commit_hashes

raise SwitchCoder(
main_model=main_model,
edit_format=edit_format,
done_messages=coder.done_messages,
cur_messages=coder.cur_messages,
)

def get_help_md(self):
"Show help about all commands in markdown"

Expand Down
8 changes: 8 additions & 0 deletions aider/helpers/coroutines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import asyncio # noqa: F401


def is_active(coroutine):
if not coroutine or coroutine.done() or coroutine.cancelled():
return False

return True
36 changes: 36 additions & 0 deletions aider/helpers/requests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from ..sendchat import ensure_alternating_roles


def thought_signature(model, messages):
# Add thought signatures for Vertex AI and Gemini models
if model.name.startswith("vertex_ai/") or model.name.startswith("gemini/"):
for msg in messages:
if "tool_calls" in msg:
tool_calls = msg["tool_calls"]
for call in tool_calls:
# Check if thought signature is missing in extra_content.google.thought_signature
if "provider_specific_fields" not in call:
call["provider_specific_fields"] = {}
if "thought_signature" not in call["provider_specific_fields"]:
call["provider_specific_fields"][
"thought_signature"
] = "skip_thought_signature_validator"

if "function_call" in msg:
call = msg["function_call"]
# Check if thought signature is missing in extra_content.google.thought_signature
if "provider_specific_fields" not in call:
call["provider_specific_fields"] = {}
if "thought_signature" not in call["provider_specific_fields"]:
call["provider_specific_fields"][
"thought_signature"
] = "skip_thought_signature_validator"

return messages


def model_request_parser(model, messages):
messages = thought_signature(model, messages)
messages = ensure_alternating_roles(messages)

return messages
35 changes: 26 additions & 9 deletions aider/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
from rich.style import Style as RichStyle
from rich.text import Text

from aider.helpers import coroutines

from .dump import dump # noqa: F401
from .editor import pipe_editor
from .utils import is_image_file, run_fzf
Expand Down Expand Up @@ -695,7 +697,7 @@ def reject_outstanding_confirmations(self):
pass

async def recreate_input(self, future=None):
if not self.input_task or self.input_task.done() or self.input_task.cancelled():
if not coroutines.is_active(self.input_task):
coder = self.coder() if self.coder else None

if coder:
Expand Down Expand Up @@ -888,7 +890,6 @@ def get_continuation(width, line_number, is_soft_wrap):
except EOFError:
raise
except KeyboardInterrupt:
await self.cancel_output_task()
self.console.print()
return ""
except UnicodeEncodeError as err:
Expand Down Expand Up @@ -961,7 +962,14 @@ async def cancel_input_task(self):
try:
input_task.cancel()
await input_task
except (asyncio.CancelledError, EOFError, IndexError):
except (
asyncio.CancelledError,
Exception,
EOFError,
IndexError,
RuntimeError,
SystemExit,
):
pass

async def cancel_output_task(self):
Expand All @@ -971,9 +979,22 @@ async def cancel_output_task(self):
try:
output_task.cancel()
await output_task
except (asyncio.CancelledError, EOFError, IndexError):
except (
asyncio.CancelledError,
Exception,
EOFError,
IndexError,
RuntimeError,
SystemExit,
):
pass

async def cancel_task_streams(self):
input_task = asyncio.create_task(self.cancel_input_task())
output_task = asyncio.create_task(self.cancel_output_task())

await asyncio.wait([input_task, output_task], return_when=asyncio.ALL_COMPLETED)

def add_to_input_history(self, inp):
if not self.input_history_file:
return
Expand Down Expand Up @@ -1153,11 +1174,7 @@ async def _confirm_ask(
if self.prompt_session:
await self.recreate_input()

if (
self.input_task
and not self.input_task.done()
and not self.input_task.cancelled()
):
if coroutines.is_active(self.input_task):
self.prompt_session.message = question
self.prompt_session.app.invalidate()
else:
Expand Down
7 changes: 7 additions & 0 deletions aider/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,13 @@ def expand_glob_patterns(patterns, root="."):


def custom_tracer(frame, event, arg):
import os

global log_file
if not log_file:
os.makedirs(".aider/logs/", exist_ok=True)
log_file = open(".aider/logs/debug.log", "w", buffering=1)

# Get the absolute path of the file where the code is executing
filename = os.path.abspath(frame.f_code.co_filename)

Expand Down
10 changes: 5 additions & 5 deletions aider/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@

from aider import __version__
from aider.dump import dump # noqa: F401
from aider.helpers.requests import model_request_parser
from aider.llm import litellm
from aider.openrouter import OpenRouterModelManager
from aider.sendchat import ensure_alternating_roles, sanity_check_messages
from aider.sendchat import sanity_check_messages
from aider.utils import check_pip_install_extra

RETRY_TIMEOUT = 60
Expand Down Expand Up @@ -438,7 +439,7 @@ def apply_generic_model_settings(self, model):
return # <--

last_segment = model.split("/")[-1]
if last_segment in ("gpt-5", "gpt-5-2025-08-07"):
if last_segment in ("gpt-5", "gpt-5-2025-08-07") or "gpt-5.1" in model:
self.use_temperature = False
self.edit_format = "diff"
if "reasoning_effort" not in self.accepts_settings:
Expand Down Expand Up @@ -909,7 +910,7 @@ async def send_completion(
if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
sanity_check_messages(messages)

messages = ensure_alternating_roles(messages)
messages = model_request_parser(self, messages)

if self.verbose:
for message in messages:
Expand Down Expand Up @@ -1000,8 +1001,7 @@ async def simple_send_with_retries(self, messages, max_tokens=None):
from aider.exceptions import LiteLLMExceptions

litellm_ex = LiteLLMExceptions()
if "deepseek-reasoner" in self.name:
messages = ensure_alternating_roles(messages)
messages = model_request_parser(self, messages)
retry_delay = 0.125

if self.verbose:
Expand Down
Loading
Loading