From 0521ec24d712a0ef3615b47767d807a5291dc36d Mon Sep 17 00:00:00 2001 From: Sanny Sanoff Date: Mon, 29 Dec 2025 20:51:20 +0100 Subject: [PATCH 01/11] feat: Add token processing and generation speed to usage report MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add --show-speed flag to enable speed display (disabled by default) - Track LLM response time (llm_elapsed) after streaming completes - Track time to first token (TtFT) for streaming responses - Calculate and display prompt processing speed (tokens/sec) - Calculate and display token generation speed (output tokens/sec) Output with --show-speed: LLM elapsed time: X.XX seconds (TtFT: X.XXs) Speed: XXX prompt tokens/sec, XXX output tokens/sec πŸ€– Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- aider/args.py | 6 +++ aider/coders/base_coder.py | 76 +++++++++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/aider/args.py b/aider/args.py index 41d883249ad..2625406839c 100644 --- a/aider/args.py +++ b/aider/args.py @@ -486,6 +486,12 @@ def get_parser(default_config_files, git_root): default="#0088ff", help="Set the color for assistant output (default: #0088ff)", ) + group.add_argument( + "--show-speed", + action="store_true", + help="Show token processing and generation speed in usage report (default: False)", + default=False, + ) group.add_argument( "--completion-menu-color", metavar="COLOR", diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index fa0a3f302bc..28500581949 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -2969,6 +2969,7 @@ async def send(self, messages, model=None, functions=None, tools=None): self.partial_response_function_call = dict() completion = None + llm_start_time = time.time() try: hash_object, completion = await model.send_completion( @@ -2982,19 +2983,21 @@ async def send(self, messages, model=None, functions=None, tools=None): self.chat_completion_call_hashes.append(hash_object.hexdigest()) if not isinstance(completion, ModelResponse): - async for chunk in self.show_send_output_stream(completion): + async for chunk in self.show_send_output_stream(completion, llm_start_time): yield chunk else: self.show_send_output(completion) - # Calculate costs for successful responses - self.calculate_and_show_tokens_and_cost(messages, completion) + # Calculate costs after streaming completes (llm_elapsed set by show_send_output_stream) + llm_elapsed = getattr(self, 'llm_elapsed', time.time() - llm_start_time) + self.calculate_and_show_tokens_and_cost(messages, completion, llm_elapsed) except LiteLLMExceptions().exceptions_tuple() as err: ex_info = LiteLLMExceptions().get_ex_info(err) if ex_info.name == "ContextWindowExceededError": # Still calculate costs for context window errors - self.calculate_and_show_tokens_and_cost(messages, completion) + llm_elapsed = time.time() - llm_start_time + self.calculate_and_show_tokens_and_cost(messages, completion, llm_elapsed) raise except KeyboardInterrupt as kbi: self.keyboard_interrupt() @@ -3056,8 +3059,10 @@ def show_send_output(self, completion): ): raise FinishReasonLength() - async def show_send_output_stream(self, completion): + async def show_send_output_stream(self, completion, llm_start_time=None): received_content = False + first_token_time = None + start_time = llm_start_time or time.time() async for chunk in completion: if self.args.debug: @@ -3084,6 +3089,8 @@ async def show_send_output_stream(self, completion): try: if chunk.choices[0].delta.tool_calls: received_content = True + if first_token_time is None: + first_token_time = time.time() for tool_call_chunk in chunk.choices[0].delta.tool_calls: self.tool_reflection = True @@ -3111,6 +3118,8 @@ async def show_send_output_stream(self, completion): self.io.update_spinner_suffix(v) received_content = True + if first_token_time is None: + first_token_time = time.time() except AttributeError: pass @@ -3130,6 +3139,8 @@ async def show_send_output_stream(self, completion): text += reasoning_content self.got_reasoning_content = True received_content = True + if first_token_time is None: + first_token_time = time.time() self.io.update_spinner_suffix(reasoning_content) self.partial_response_reasoning_content += reasoning_content @@ -3142,6 +3153,8 @@ async def show_send_output_stream(self, completion): text += content received_content = True + if first_token_time is None: + first_token_time = time.time() self.io.update_spinner_suffix(content) except AttributeError: pass @@ -3173,6 +3186,11 @@ async def show_send_output_stream(self, completion): if not received_content and len(self.partial_response_tool_calls) == 0: self.io.tool_warning("Empty response received from LLM. Check your provider account?") + # Set timing info for reporting (after streaming completes) + self.llm_elapsed = time.time() - start_time + if first_token_time is not None: + self.first_token_time = first_token_time - start_time + def consolidate_chunks(self): response = ( self.partial_response_chunks[0] @@ -3333,7 +3351,7 @@ def remove_reasoning_content(self): self.reasoning_tag_name, ) - def calculate_and_show_tokens_and_cost(self, messages, completion=None): + def calculate_and_show_tokens_and_cost(self, messages, completion=None, llm_elapsed=None): prompt_tokens = 0 completion_tokens = 0 cache_hit_tokens = 0 @@ -3372,6 +3390,8 @@ def calculate_and_show_tokens_and_cost(self, messages, completion=None): if not self.main_model.info.get("input_cost_per_token"): self.usage_report = tokens_report + # Still add speed info even without cost + self._add_speed_info(llm_elapsed) return try: @@ -3400,6 +3420,50 @@ def calculate_and_show_tokens_and_cost(self, messages, completion=None): self.usage_report = tokens_report + sep + cost_report + # Add LLM elapsed time and speed information + self._add_speed_info(llm_elapsed) + + def _add_speed_info(self, llm_elapsed): + """Add LLM elapsed time and speed information to usage report.""" + if llm_elapsed is None or self.usage_report is None: + return + + # Check if speed display is enabled + show_speed = getattr(self.args, 'show_speed', False) if self.args else False + + if not show_speed: + return + + time_report = f"\nLLM elapsed time: {llm_elapsed:.2f} seconds" + # Add time to first token if available + if hasattr(self, 'first_token_time'): + time_report += f" (TtFT: {self.first_token_time:.2f}s)" + + # Add processing and generation speeds if we have the data + if hasattr(self, 'message_tokens_sent') and hasattr(self, 'message_tokens_received'): + sent_tokens = self.message_tokens_sent + received_tokens = self.message_tokens_received + if sent_tokens > 0 and received_tokens > 0: + # Calculate prompt processing speed (tokens/sec) based on time to first token + if hasattr(self, 'first_token_time') and self.first_token_time > 0: + prompt_processing_speed = sent_tokens / self.first_token_time + time_report += f"\nSpeed: {prompt_processing_speed:.0f} prompt tokens/sec" + + # Calculate token generation speed based on time after first token + if hasattr(self, 'first_token_time') and self.first_token_time > 0: + generation_time = llm_elapsed - self.first_token_time + if generation_time > 0: + token_generation_speed = received_tokens / generation_time + time_report += f", {token_generation_speed:.0f} output tokens/sec" + else: + token_generation_speed = received_tokens / llm_elapsed + if hasattr(self, 'first_token_time') and self.first_token_time > 0: + time_report += f", {token_generation_speed:.0f} output tokens/sec" + else: + time_report += f"\nSpeed: {token_generation_speed:.0f} output tokens/sec" + + self.usage_report += time_report + def format_cost(self, value): if value == 0: return "0.00" From ae16e5b852db244757c967fe54e23343de7e09c4 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 09:05:26 -0500 Subject: [PATCH 02/11] Bump Version --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 18e3a333286..bbc69f55891 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.91.3.dev" +__version__ = "0.91.5.dev" safe_version = __version__ try: From f606d04f59babab15d4c20394354f594cadb3c34 Mon Sep 17 00:00:00 2001 From: Sanny Sanoff Date: Wed, 31 Dec 2025 16:11:24 +0100 Subject: [PATCH 03/11] minimize the complexity of coder changes. --- aider/coders/base_coder.py | 85 ++++--------------- aider/helpers/profiler.py | 162 +++++++++++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+), 70 deletions(-) create mode 100644 aider/helpers/profiler.py diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 28500581949..fbc41456cf3 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -39,6 +39,7 @@ from aider.commands import Commands, SwitchCoder from aider.exceptions import LiteLLMExceptions from aider.helpers import coroutines +from aider.helpers.profiler import TokenProfiler from aider.history import ChatSummary from aider.io import ConfirmGroup, InputOutput from aider.linter import Linter @@ -365,6 +366,7 @@ def __init__( self.message_tokens_sent = 0 self.message_tokens_received = 0 + self.profiler = TokenProfiler(enable_printing=getattr(args, 'show_speed', False) if args else False) self.verbose = verbose self.abs_fnames = set() self.abs_read_only_fnames = set() @@ -2969,7 +2971,7 @@ async def send(self, messages, model=None, functions=None, tools=None): self.partial_response_function_call = dict() completion = None - llm_start_time = time.time() + self.profiler.start() try: hash_object, completion = await model.send_completion( @@ -2983,21 +2985,20 @@ async def send(self, messages, model=None, functions=None, tools=None): self.chat_completion_call_hashes.append(hash_object.hexdigest()) if not isinstance(completion, ModelResponse): - async for chunk in self.show_send_output_stream(completion, llm_start_time): + async for chunk in self.show_send_output_stream(completion): yield chunk else: self.show_send_output(completion) - # Calculate costs after streaming completes (llm_elapsed set by show_send_output_stream) - llm_elapsed = getattr(self, 'llm_elapsed', time.time() - llm_start_time) - self.calculate_and_show_tokens_and_cost(messages, completion, llm_elapsed) + # Calculate costs for successful responses + self.calculate_and_show_tokens_and_cost(messages, completion) except LiteLLMExceptions().exceptions_tuple() as err: ex_info = LiteLLMExceptions().get_ex_info(err) if ex_info.name == "ContextWindowExceededError": # Still calculate costs for context window errors - llm_elapsed = time.time() - llm_start_time - self.calculate_and_show_tokens_and_cost(messages, completion, llm_elapsed) + self.profiler.on_error() + self.calculate_and_show_tokens_and_cost(messages, completion) raise except KeyboardInterrupt as kbi: self.keyboard_interrupt() @@ -3059,10 +3060,8 @@ def show_send_output(self, completion): ): raise FinishReasonLength() - async def show_send_output_stream(self, completion, llm_start_time=None): + async def show_send_output_stream(self, completion): received_content = False - first_token_time = None - start_time = llm_start_time or time.time() async for chunk in completion: if self.args.debug: @@ -3089,8 +3088,7 @@ async def show_send_output_stream(self, completion, llm_start_time=None): try: if chunk.choices[0].delta.tool_calls: received_content = True - if first_token_time is None: - first_token_time = time.time() + self.profiler.on_token() for tool_call_chunk in chunk.choices[0].delta.tool_calls: self.tool_reflection = True @@ -3118,8 +3116,7 @@ async def show_send_output_stream(self, completion, llm_start_time=None): self.io.update_spinner_suffix(v) received_content = True - if first_token_time is None: - first_token_time = time.time() + self.profiler.on_token() except AttributeError: pass @@ -3139,8 +3136,7 @@ async def show_send_output_stream(self, completion, llm_start_time=None): text += reasoning_content self.got_reasoning_content = True received_content = True - if first_token_time is None: - first_token_time = time.time() + self.profiler.on_token() self.io.update_spinner_suffix(reasoning_content) self.partial_response_reasoning_content += reasoning_content @@ -3153,8 +3149,7 @@ async def show_send_output_stream(self, completion, llm_start_time=None): text += content received_content = True - if first_token_time is None: - first_token_time = time.time() + self.profiler.on_token() self.io.update_spinner_suffix(content) except AttributeError: pass @@ -3186,11 +3181,6 @@ async def show_send_output_stream(self, completion, llm_start_time=None): if not received_content and len(self.partial_response_tool_calls) == 0: self.io.tool_warning("Empty response received from LLM. Check your provider account?") - # Set timing info for reporting (after streaming completes) - self.llm_elapsed = time.time() - start_time - if first_token_time is not None: - self.first_token_time = first_token_time - start_time - def consolidate_chunks(self): response = ( self.partial_response_chunks[0] @@ -3351,7 +3341,7 @@ def remove_reasoning_content(self): self.reasoning_tag_name, ) - def calculate_and_show_tokens_and_cost(self, messages, completion=None, llm_elapsed=None): + def calculate_and_show_tokens_and_cost(self, messages, completion=None): prompt_tokens = 0 completion_tokens = 0 cache_hit_tokens = 0 @@ -3387,11 +3377,10 @@ def calculate_and_show_tokens_and_cost(self, messages, completion=None, llm_elap if cache_hit_tokens: tokens_report += f", {format_tokens(cache_hit_tokens)} cache hit" tokens_report += f", {format_tokens(self.message_tokens_received)} received." + tokens_report = self.profiler.add_to_usage_report(tokens_report, self.message_tokens_sent, self.message_tokens_received) if not self.main_model.info.get("input_cost_per_token"): self.usage_report = tokens_report - # Still add speed info even without cost - self._add_speed_info(llm_elapsed) return try: @@ -3420,50 +3409,6 @@ def calculate_and_show_tokens_and_cost(self, messages, completion=None, llm_elap self.usage_report = tokens_report + sep + cost_report - # Add LLM elapsed time and speed information - self._add_speed_info(llm_elapsed) - - def _add_speed_info(self, llm_elapsed): - """Add LLM elapsed time and speed information to usage report.""" - if llm_elapsed is None or self.usage_report is None: - return - - # Check if speed display is enabled - show_speed = getattr(self.args, 'show_speed', False) if self.args else False - - if not show_speed: - return - - time_report = f"\nLLM elapsed time: {llm_elapsed:.2f} seconds" - # Add time to first token if available - if hasattr(self, 'first_token_time'): - time_report += f" (TtFT: {self.first_token_time:.2f}s)" - - # Add processing and generation speeds if we have the data - if hasattr(self, 'message_tokens_sent') and hasattr(self, 'message_tokens_received'): - sent_tokens = self.message_tokens_sent - received_tokens = self.message_tokens_received - if sent_tokens > 0 and received_tokens > 0: - # Calculate prompt processing speed (tokens/sec) based on time to first token - if hasattr(self, 'first_token_time') and self.first_token_time > 0: - prompt_processing_speed = sent_tokens / self.first_token_time - time_report += f"\nSpeed: {prompt_processing_speed:.0f} prompt tokens/sec" - - # Calculate token generation speed based on time after first token - if hasattr(self, 'first_token_time') and self.first_token_time > 0: - generation_time = llm_elapsed - self.first_token_time - if generation_time > 0: - token_generation_speed = received_tokens / generation_time - time_report += f", {token_generation_speed:.0f} output tokens/sec" - else: - token_generation_speed = received_tokens / llm_elapsed - if hasattr(self, 'first_token_time') and self.first_token_time > 0: - time_report += f", {token_generation_speed:.0f} output tokens/sec" - else: - time_report += f"\nSpeed: {token_generation_speed:.0f} output tokens/sec" - - self.usage_report += time_report - def format_cost(self, value): if value == 0: return "0.00" diff --git a/aider/helpers/profiler.py b/aider/helpers/profiler.py new file mode 100644 index 00000000000..ddd62f8e574 --- /dev/null +++ b/aider/helpers/profiler.py @@ -0,0 +1,162 @@ +"""Token profiler for tracking and reporting LLM token timing metrics.""" + +import time +from typing import Optional + + +class TokenProfiler: + """ + A profiler for tracking LLM token timing metrics with minimal interface. + + Handles all timing logic internally - just need to: + 1. Create with enable_printing flag + 2. Call start() when starting LLM request + 3. Call on_token() for each token received (auto-detects first token) + 4. Call set_token_counts() with input/output token counts + 5. Call get_report() to get formatted report (only if enabled) + 6. Call on_error() for error cases + """ + + def __init__(self, enable_printing: bool = False): + """ + Initialize the token profiler. + + Args: + enable_printing: If True, generate reports when get_report() is called + """ + self._enabled = enable_printing + self._start_time: Optional[float] = None + self._first_token_time: Optional[float] = None + self._end_time: Optional[float] = None + self._input_tokens: int = 0 + self._output_tokens: int = 0 + self._has_seen_first_token: bool = False + + def start(self) -> None: + """Start timing an LLM request.""" + self._start_time = time.time() + self._first_token_time = None + self._end_time = None + self._has_seen_first_token = False + self._input_tokens = 0 + self._output_tokens = 0 + + def on_token(self) -> None: + """ + Record that a token was received. + Auto-detects if this is the first token. + """ + if not self._enabled or not self._start_time: + return + + if not self._has_seen_first_token: + self._first_token_time = time.time() + self._has_seen_first_token = True + + def set_token_counts(self, input_tokens: int, output_tokens: int) -> None: + """ + Set the token counts for the request. + + Args: + input_tokens: Number of input/prompt tokens + output_tokens: Number of output/generated tokens + """ + if not self._enabled: + return + + self._input_tokens = input_tokens + self._output_tokens = output_tokens + + def on_error(self) -> None: + """Handle error case - finalize timing.""" + if not self._enabled or not self._start_time: + return + + if self._end_time is None: + self._end_time = time.time() + + def get_report(self) -> Optional[str]: + """ + Get the formatted speed report (only if enabled). + + Returns: + Formatted report string, or None if disabled or no data + """ + if not self._enabled or not self._start_time: + return None + + # Calculate elapsed time + if self._end_time is not None: + elapsed = self._end_time - self._start_time + else: + elapsed = time.time() - self._start_time + + # Build the time report + report = f"\nLLM elapsed time: {elapsed:.2f} seconds" + + # Add time to first token if available + if self._first_token_time is not None: + ttft = self._first_token_time - self._start_time + report += f" (TtFT: {ttft:.2f}s)" + + # Add speed information if we have token data + if self._input_tokens > 0 and self._output_tokens > 0: + speed_parts = [] + + # Prompt processing speed (based on time to first token) + if ttft > 0: + prompt_speed = self._input_tokens / ttft + speed_parts.append(f"{prompt_speed:.0f} prompt tokens/sec") + + # Token generation speed (based on time after first token) + generation_time = elapsed - ttft + if generation_time > 0: + generation_speed = self._output_tokens / generation_time + speed_parts.append(f"{generation_speed:.0f} output tokens/sec") + + if speed_parts: + report += "\nSpeed: " + ", ".join(speed_parts) + + return report + + def get_elapsed(self) -> Optional[float]: + """ + Get the elapsed time for the current request. + + Returns: + Elapsed time in seconds, or None if not started + """ + if not self._start_time: + return None + + if self._end_time is not None: + return self._end_time - self._start_time + + return time.time() - self._start_time + + def add_to_usage_report(self, usage_report: Optional[str], input_tokens: int = 0, output_tokens: int = 0) -> str: + """ + Add speed report to usage_report and return the combined string. + + Args: + usage_report: The existing usage report string + input_tokens: Number of input/prompt tokens (optional, updates if provided) + output_tokens: Number of output/generated tokens (optional, updates if provided) + + Returns: + The usage report with speed info appended (if enabled), or original if disabled + """ + if not usage_report: + return usage_report + + # Update token counts if provided + if input_tokens > 0 or output_tokens > 0: + self.set_token_counts(input_tokens, output_tokens) + + speed_report = self.get_report() + if speed_report: + return usage_report + speed_report + + return usage_report + + From a27fcf73af25ae6a64c7c414ccd587fa1e315a07 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 13:19:27 -0500 Subject: [PATCH 04/11] #335, #339: cmd_web no longer exists, update to use do_run() --- aider/coders/base_coder.py | 2 +- aider/commands.py | 26 ++++++++++++++------------ tests/scrape/test_scrape.py | 2 +- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 51b04807273..6aad6b43bf0 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1652,7 +1652,7 @@ async def check_for_urls(self, inp: str) -> List[str]: "Add URL to the chat?", subject=url, group=group, allow_never=True ): inp += "\n\n" - inp += await self.commands.cmd_web(url, return_content=True) + inp += await self.commands.do_run("web", url, return_content=True) else: self.rejected_urls.add(url) diff --git a/aider/commands.py b/aider/commands.py index 7cb8811e4a3..603b2142b08 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -105,7 +105,7 @@ def get_commands(self): commands = [f"/{cmd}" for cmd in registry_commands] return sorted(commands) - async def do_run(self, cmd_name, args): + async def do_run(self, cmd_name, args, **kwargs): # Execute command using registry command_class = CommandRegistry.get_command(cmd_name) if not command_class: @@ -115,17 +115,19 @@ async def do_run(self, cmd_name, args): self.cmd_running_event.clear() # Command is running try: # Generate a spreadable kwargs dict with all relevant Commands attributes - kwargs = { - "original_read_only_fnames": self.original_read_only_fnames, - "voice_language": self.voice_language, - "voice_format": self.voice_format, - "voice_input_device": self.voice_input_device, - "verify_ssl": self.verify_ssl, - "parser": self.parser, - "verbose": self.verbose, - "editor": self.editor, - "system_args": self.args, - } + kwargs.update( + { + "original_read_only_fnames": self.original_read_only_fnames, + "voice_language": self.voice_language, + "voice_format": self.voice_format, + "voice_input_device": self.voice_input_device, + "verify_ssl": self.verify_ssl, + "parser": self.parser, + "verbose": self.verbose, + "editor": self.editor, + "system_args": self.args, + } + ) return await CommandRegistry.execute( cmd_name, diff --git a/tests/scrape/test_scrape.py b/tests/scrape/test_scrape.py index 8eadf92d60f..84c84c6d7b5 100644 --- a/tests/scrape/test_scrape.py +++ b/tests/scrape/test_scrape.py @@ -58,7 +58,7 @@ def mock_install(*args, **kwargs): try: # Run the cmd_web command - result = await self.commands.cmd_web("https://example.com", return_content=True) + result = await self.commands.do_run("web", "https://example.com", return_content=True) # Assert that the result contains some content self.assertIsNotNone(result) From 636a5f9914b7b7524a60325d3bec9d18cd5c9441 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 15:03:35 -0500 Subject: [PATCH 05/11] Unify context management commands into a single `ContextManager` tool, address #306 --- aider/coders/agent_coder.py | 81 +-------------- aider/prompts/agent.yml | 6 +- aider/tools/__init__.py | 10 +- aider/tools/context_manager.py | 175 +++++++++++++++++++++++++++++++++ aider/tools/extract_lines.py | 164 ++++++++++++------------------ aider/tools/make_editable.py | 69 ------------- aider/tools/make_readonly.py | 53 ---------- aider/tools/remove.py | 75 -------------- aider/tools/utils/helpers.py | 8 +- aider/tools/view.py | 41 -------- 10 files changed, 255 insertions(+), 427 deletions(-) create mode 100644 aider/tools/context_manager.py delete mode 100644 aider/tools/make_editable.py delete mode 100644 aider/tools/make_readonly.py delete mode 100644 aider/tools/remove.py delete mode 100644 aider/tools/view.py diff --git a/aider/coders/agent_coder.py b/aider/coders/agent_coder.py index 03da769386b..6f91ec1d5b6 100644 --- a/aider/coders/agent_coder.py +++ b/aider/coders/agent_coder.py @@ -34,43 +34,7 @@ from aider.repo import ANY_GIT_ERROR # Import tool modules for registry -# Import tool modules for registry -from aider.tools import ( - command, - command_interactive, - delete_block, - delete_line, - delete_lines, - extract_lines, - finished, - git_branch, - git_diff, - git_log, - git_remote, - git_show, - git_status, - grep, - indent_lines, - insert_block, - list_changes, - load_skill, - ls, - make_editable, - make_readonly, - remove, - remove_skill, - replace_all, - replace_line, - replace_lines, - replace_text, - show_numbered_context, - thinking, - undo_change, - update_todo_list, - view, - view_files_matching, - view_files_with_symbol, -) +from aider.tools import TOOL_MODULES from .base_coder import ChatChunks, Coder from .editblock_coder import do_replace, find_original_update_blocks, find_similar_lines @@ -178,42 +142,7 @@ def _build_tool_registry(self): registry = {} # Add tools that have been imported - tool_modules = [ - command, - command_interactive, - delete_block, - delete_line, - delete_lines, - extract_lines, - finished, - git_branch, - git_diff, - git_log, - git_remote, - git_show, - git_status, - grep, - indent_lines, - insert_block, - list_changes, - load_skill, - ls, - make_editable, - make_readonly, - remove, - remove_skill, - replace_all, - replace_line, - replace_lines, - replace_text, - show_numbered_context, - thinking, - undo_change, - update_todo_list, - view, - view_files_matching, - view_files_with_symbol, - ] + tool_modules = TOOL_MODULES # Process agent configuration if provided agent_config = self._get_agent_config() @@ -229,7 +158,7 @@ def _build_tool_registry(self): tools_excludelist.append("removeskill") # Always include essential tools regardless of includelist/excludelist - essential_tools = {"makeeditable", "replacetext", "view", "finished"} + essential_tools = {"contextmanager", "replacetext", "finished"} for module in tool_modules: if hasattr(module, "Tool"): tool_class = module.Tool @@ -1069,8 +998,8 @@ def get_context_summary(self): percentage = (total_tokens / max_input_tokens) * 100 result += f" ({percentage:.1f}% of limit)" if percentage > 80: - result += "\n\n⚠️ **Context is getting full!** Remove non-essential files via:\n" - result += '- `[tool_call(Remove, file_path="path/to/large_file.ext")]`\n' + result += "\n\n⚠️ **Context is getting full!**\n" + result += "- Remove non-essential files via the `ContextManager` tool.\n" result += "- Keep only essential files in context for best performance" result += "\n" diff --git a/aider/prompts/agent.yml b/aider/prompts/agent.yml index 7429525e4be..303f65fc7b4 100644 --- a/aider/prompts/agent.yml +++ b/aider/prompts/agent.yml @@ -20,7 +20,7 @@ main_system: | ## Core Directives - **Role**: Act as an expert software engineer. - - **Act Proactively**: Autonomously use file discovery and context management tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `View`, `Remove`) to gather information and fulfill the user's request. Chain tool calls across multiple turns to continue exploration. + - **Act Proactively**: Autonomously use file discovery and context management tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `ContextManager`) to gather information and fulfill the user's request. Chain tool calls across multiple turns to continue exploration. - **Be Decisive**: Trust that your initial findings are valid. Refrain from asking the same question or searching for the same term in multiple similar ways. - **Be Concise**: Keep all responses brief and direct (1-3 sentences). Avoid preamble, postamble, and unnecessary explanations. Do not repeat yourself. - **Be Careful**: Break updates down into smaller, more manageable chunks. Focus on one thing at a time. @@ -30,7 +30,7 @@ main_system: | 1. **Plan**: Determine the necessary changes. Use the `UpdateTodoList` tool to manage your plan. Always begin by updating the todo list. 2. **Explore**: Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `Grep`) to find relevant files. These tools add files to context as read-only. Use `Grep` first for broad searches to avoid context clutter. Concisely describe your search strategy with the `Thinking` tool. 3. **Think**: Given the contents of your exploration, concisely reason through the edits with the `Thinking` tool that need to be made to accomplish the goal. For complex edits, briefly outline your plan for the user. - 4. **Execute**: Use the appropriate editing tool. Remember to use `MakeEditable` on a file before modifying it. Break large edits (those greater than ~100 lines) into multiple smaller steps. Proactively use skills if they are available + 4. **Execute**: Use the appropriate editing tool. Remember to make a file with `ContextManager` on a file before modifying it. Break large edits (those greater than ~100 lines) into multiple smaller steps. Proactively use skills if they are available 5. **Verify & Recover**: After every edit, check the resulting diff snippet. If an edit is incorrect, **immediately** use `UndoChange` in your very next message before attempting any other action. 6. **Finished**: Use the `Finished` tool when all tasks and changes needed to accomplish the goal are finished ## Todo List Management @@ -60,7 +60,7 @@ system_reminder: | - Stay on task. Do not pursue goals the user did not ask for. - Any tool call automatically continues to the next turn. Provide no tool calls in your final answer. - Use context blocks (directory structure, git status) to orient yourself. - - Remove files from the context when you are done with viewing/editing with the `Remove` tool. It is fine to re-add them later, if they are needed again + - Remove files from the context when you no longer need them with the `ContextManager` tool. It is fine to re-add them later, if they are needed again - Remove skills if they are not helpful for your current task with `RemoveSkill` {lazy_prompt} {shell_cmd_reminder} diff --git a/aider/tools/__init__.py b/aider/tools/__init__.py index 03a5f8c2b45..1ca38ddbf2d 100644 --- a/aider/tools/__init__.py +++ b/aider/tools/__init__.py @@ -5,6 +5,7 @@ from . import ( command, command_interactive, + context_manager, delete_block, delete_line, delete_lines, @@ -21,9 +22,6 @@ insert_block, list_changes, ls, - make_editable, - make_readonly, - remove, replace_all, replace_line, replace_lines, @@ -32,7 +30,6 @@ thinking, undo_change, update_todo_list, - view, view_files_matching, view_files_with_symbol, ) @@ -41,6 +38,7 @@ TOOL_MODULES = [ command, command_interactive, + context_manager, delete_block, delete_line, delete_lines, @@ -57,9 +55,6 @@ insert_block, list_changes, ls, - make_editable, - make_readonly, - remove, replace_all, replace_line, replace_lines, @@ -68,7 +63,6 @@ thinking, undo_change, update_todo_list, - view, view_files_matching, view_files_with_symbol, ] diff --git a/aider/tools/context_manager.py b/aider/tools/context_manager.py new file mode 100644 index 00000000000..740b91ed785 --- /dev/null +++ b/aider/tools/context_manager.py @@ -0,0 +1,175 @@ +import os +import time + +from aider.tools.utils.base_tool import BaseTool +from aider.tools.utils.helpers import ToolError + + +class Tool(BaseTool): + NORM_NAME = "contextmanager" + SCHEMA = { + "type": "function", + "function": { + "name": "ContextManager", + "description": ( + "Manage multiple files in the chat context: remove, editable, view, and create." + " Accepts arrays of file paths for each operation." + ), + "parameters": { + "type": "object", + "properties": { + "remove": { + "type": "array", + "items": {"type": "string"}, + "description": "List of file paths to remove from context.", + }, + "editable": { + "type": "array", + "items": {"type": "string"}, + "description": ( + "List of file paths to make editable. Limit to at most 2 at a time." + ), + }, + "view": { + "type": "array", + "items": {"type": "string"}, + "description": ( + "List of file paths to view (add as read-only). Limit to at most 2 at a" + " time." + ), + }, + "create": { + "type": "array", + "items": {"type": "string"}, + "description": "List of file paths to create.", + }, + }, + "additionalProperties": False, + "required": [], + }, + }, + } + + @classmethod + def execute(cls, coder, remove=None, editable=None, view=None, create=None): + """Perform batch operations on the coder's context. + + Parameters + ---------- + coder: Coder instance + The active coder handling file context. + remove: list[str] | None + Files to remove from the context. + editable: list[str] | None + Files to promote to editable status. + view: list[str] | None + Files to add as read-only view. + create: list[str] | None + Files to create and make editable. + """ + remove_files = remove or [] + editable_files = editable or [] + view_files = view or [] + create_files = create or [] + + if not remove_files and not editable_files and not view_files and not create_files: + raise ToolError("You must specify at least one of: remove, editable, view, or create") + + messages = [] + + for f in create_files: + messages.append(cls._create(coder, f)) + for f in remove_files: + messages.append(cls._remove(coder, f)) + for f in view_files: + messages.append(cls._view(coder, f)) + for f in editable_files: + messages.append(cls._editable(coder, f)) + + return "\n".join(messages) + + @staticmethod + def _remove(coder, file_path): + """Remove a file from the coder's context.""" + try: + abs_path = coder.abs_root_path(file_path) + rel_path = coder.get_rel_fname(abs_path) + removed = False + if abs_path in coder.abs_fnames: + coder.abs_fnames.remove(abs_path) + removed = True + elif abs_path in coder.abs_read_only_fnames: + coder.abs_read_only_fnames.remove(abs_path) + removed = True + if not removed: + coder.io.tool_output("⚠️ File '{file_path}' not in context") + return f"File not in context: {file_path}" + coder.recently_removed[rel_path] = {"removed_at": time.time()} + coder.io.tool_output("πŸ—‘οΈ Removed '{file_path}' from context") + return f"Removed: {file_path}" + except Exception as e: + coder.io.tool_error(f"Error removing file '{file_path}': {str(e)}") + return f"Error removing {file_path}: {e}" + + @staticmethod + def _editable(coder, file_path): + """Make a file editable in the coder's context.""" + try: + abs_path = coder.abs_root_path(file_path) + if abs_path in coder.abs_fnames: + coder.io.tool_output("πŸ“ File '{file_path}' is already editable") + return f"Already editable: {file_path}" + if not os.path.isfile(abs_path): + coder.io.tool_output("⚠️ File '{file_path}' not found on disk") + return f"File not found: {file_path}" + was_read_only = False + if abs_path in coder.abs_read_only_fnames: + coder.abs_read_only_fnames.remove(abs_path) + was_read_only = True + coder.abs_fnames.add(abs_path) + if was_read_only: + coder.io.tool_output("πŸ“ Moved '{file_path}' from read-only to editable") + return f"Made editable (moved): {file_path}" + else: + coder.io.tool_output("πŸ“ Added '{file_path}' directly to editable context") + return f"Made editable (added): {file_path}" + except Exception as e: + coder.io.tool_error(f"Error making editable '{file_path}': {str(e)}") + return f"Error making editable {file_path}: {e}" + + @staticmethod + def _view(coder, file_path): + """View a file (add as read‑only) in the coder's context.""" + try: + return coder._add_file_to_context(file_path, explicit=True) + except Exception as e: + coder.io.tool_error(f"Error viewing file '{file_path}': {str(e)}") + return f"Error viewing {file_path}: {e}" + + @staticmethod + def _create(coder, file_path): + """Create a new file on the file system and make it editable in the coder's context.""" + try: + abs_path = coder.abs_root_path(file_path) + + # Check if file already exists + if os.path.exists(abs_path): + coder.io.tool_output("⚠️ File '{file_path}' already exists") + return f"File already exists: {file_path}" + + # Create parent directories if they don't exist + os.makedirs(os.path.dirname(abs_path), exist_ok=True) + + # Create an empty file + with open(abs_path, "w", encoding="utf-8"): + pass + + # Add the file to editable context + coder.abs_fnames.add(abs_path) + + coder.io.tool_output("πŸ“ Created '{file_path}' and made it editable") + return f"Created and made editable: {file_path}" + + except Exception as e: + coder.io.tool_error(f"Error creating file '{file_path}': {str(e)}") + return f"Error creating {file_path}: {e}" diff --git a/aider/tools/extract_lines.py b/aider/tools/extract_lines.py index 84d35e82b5b..2d2ec7fa71a 100644 --- a/aider/tools/extract_lines.py +++ b/aider/tools/extract_lines.py @@ -1,8 +1,13 @@ import os -import traceback from aider.tools.utils.base_tool import BaseTool -from aider.tools.utils.helpers import generate_unified_diff_snippet +from aider.tools.utils.helpers import ( + ToolError, + apply_change, + generate_unified_diff_snippet, + handle_tool_error, + validate_file_for_edit, +) class Tool(BaseTool): @@ -58,55 +63,28 @@ def execute( Returns a result message. """ + tool_name = "ExtractLines" try: # --- Validate Source File --- - abs_source_path = coder.abs_root_path(source_file_path) - rel_source_path = coder.get_rel_fname(abs_source_path) - - if not os.path.isfile(abs_source_path): - coder.io.tool_error(f"Source file '{source_file_path}' not found") - return "Error: Source file not found" - - if abs_source_path not in coder.abs_fnames: - if abs_source_path in coder.abs_read_only_fnames: - coder.io.tool_error( - f"Source file '{source_file_path}' is read-only. Use MakeEditable first." - ) - return "Error: Source file is read-only. Use MakeEditable first." - else: - coder.io.tool_error(f"Source file '{source_file_path}' not in context") - return "Error: Source file not in context" + abs_source_path, rel_source_path, source_content = validate_file_for_edit( + coder, source_file_path + ) # --- Validate Target File --- abs_target_path = coder.abs_root_path(target_file_path) rel_target_path = coder.get_rel_fname(abs_target_path) target_exists = os.path.isfile(abs_target_path) - target_is_editable = abs_target_path in coder.abs_fnames - target_is_readonly = abs_target_path in coder.abs_read_only_fnames - - if target_exists and not target_is_editable: - if target_is_readonly: - coder.io.tool_error( - f"Target file '{target_file_path}' exists but is read-only. Use" - " MakeEditable first." - ) - return "Error: Target file exists but is read-only. Use MakeEditable first." - else: - # This case shouldn't happen if file exists, but handle defensively - coder.io.tool_error( - f"Target file '{target_file_path}' exists but is not in context. Add it" - " first." - ) - return "Error: Target file exists but is not in context." - # --- Read Source Content --- - source_content = coder.io.read_text(abs_source_path) - if source_content is None: - coder.io.tool_error( - f"Could not read source file '{source_file_path}' before ExtractLines" - " operation." - ) - return f"Error: Could not read source file '{source_file_path}'" + if target_exists: + # If target exists, validate it for editing + try: + _, _, target_content = validate_file_for_edit(coder, target_file_path) + except ToolError as e: + coder.io.tool_error(f"Target file validation failed: {str(e)}") + return f"Error: {str(e)}" + else: + # Target doesn't exist, start with empty content + target_content = "" # --- Find Extraction Range --- if end_pattern and line_count: @@ -114,7 +92,6 @@ def execute( return "Error: Cannot specify both end_pattern and line_count" source_lines = source_content.splitlines() - original_source_content = source_content start_pattern_line_indices = [] for i, line in enumerate(source_lines): @@ -197,16 +174,6 @@ def execute( new_source_lines = source_lines[:start_line] + source_lines[end_line + 1 :] new_source_content = "\n".join(new_source_lines) - target_content = "" - if target_exists: - target_content = coder.io.read_text(abs_target_path) - if target_content is None: - coder.io.tool_error( - f"Could not read existing target file '{target_file_path}'." - ) - return f"Error: Could not read target file '{target_file_path}'" - original_target_content = target_content # For tracking - # Append extracted lines to target content, ensuring a newline if target wasn't empty extracted_block = "\n".join(extracted_lines) if target_content and not target_content.endswith("\n"): @@ -215,11 +182,11 @@ def execute( # --- Generate Diffs --- source_diff_snippet = generate_unified_diff_snippet( - original_source_content, new_source_content, rel_source_path + source_content, new_source_content, rel_source_path ) target_insertion_line = len(target_content.splitlines()) if target_content else 0 target_diff_snippet = generate_unified_diff_snippet( - original_target_content, new_target_content, rel_target_path + target_content, new_target_content, rel_target_path ) # --- Handle Dry Run --- @@ -240,49 +207,43 @@ def execute( ) # --- Apply Changes (Not Dry Run) --- - coder.io.write_text(abs_source_path, new_source_content) - coder.io.write_text(abs_target_path, new_target_content) - - # --- Track Changes --- - source_change_id = "TRACKING_FAILED" - target_change_id = "TRACKING_FAILED" - try: - source_metadata = { - "start_line": start_line + 1, - "end_line": end_line + 1, - "start_pattern": start_pattern, - "end_pattern": end_pattern, - "line_count": line_count, - "near_context": near_context, - "occurrence": occurrence, - "extracted_content": extracted_block, - "target_file": rel_target_path, - } - source_change_id = coder.change_tracker.track_change( - file_path=rel_source_path, - change_type="extractlines_source", - original_content=original_source_content, - new_content=new_source_content, - metadata=source_metadata, - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking source change for ExtractLines: {track_e}") + # Apply source change + source_metadata = { + "start_line": start_line + 1, + "end_line": end_line + 1, + "start_pattern": start_pattern, + "end_pattern": end_pattern, + "line_count": line_count, + "near_context": near_context, + "occurrence": occurrence, + "extracted_content": extracted_block, + "target_file": rel_target_path, + } + source_change_id = apply_change( + coder, + abs_source_path, + rel_source_path, + source_content, + new_source_content, + "extractlines_source", + source_metadata, + ) - try: - target_metadata = { - "insertion_line": target_insertion_line + 1, - "inserted_content": extracted_block, - "source_file": rel_source_path, - } - target_change_id = coder.change_tracker.track_change( - file_path=rel_target_path, - change_type="extractlines_target", - original_content=original_target_content, - new_content=new_target_content, - metadata=target_metadata, - ) - except Exception as track_e: - coder.io.tool_error(f"Error tracking target change for ExtractLines: {track_e}") + # Apply target change + target_metadata = { + "insertion_line": target_insertion_line + 1, + "inserted_content": extracted_block, + "source_file": rel_source_path, + } + target_change_id = apply_change( + coder, + abs_target_path, + rel_target_path, + target_content, + new_target_content, + "extractlines_target", + target_metadata, + ) # --- Update Context --- coder.files_edited_by_tools.add(rel_source_path) @@ -312,6 +273,9 @@ def execute( f" (Insertion):\n{target_diff_snippet}" ) + except ToolError as e: + # Handle errors raised by utility functions or explicitly raised here + return handle_tool_error(coder, tool_name, e, add_traceback=False) except Exception as e: - coder.io.tool_error(f"Error in ExtractLines: {str(e)}\n{traceback.format_exc()}") - return f"Error: {str(e)}" + # Handle unexpected errors + return handle_tool_error(coder, tool_name, e) diff --git a/aider/tools/make_editable.py b/aider/tools/make_editable.py deleted file mode 100644 index cdd51daa732..00000000000 --- a/aider/tools/make_editable.py +++ /dev/null @@ -1,69 +0,0 @@ -import os - -from aider.tools.utils.base_tool import BaseTool - - -class Tool(BaseTool): - NORM_NAME = "makeeditable" - SCHEMA = { - "type": "function", - "function": { - "name": "MakeEditable", - "description": "Make a read-only file editable.", - "parameters": { - "type": "object", - "properties": { - "file_path": { - "type": "string", - "description": "The path to the file to make editable.", - }, - }, - "required": ["file_path"], - }, - }, - } - - # Keep the underscore prefix as this function is primarily for internal coder use - @classmethod - def execute(cls, coder, file_path): - """ - Convert a read-only file to an editable file. - - This allows the LLM to upgrade a file from read-only to editable - when it determines it needs to make changes to that file. - """ - try: - # Get absolute path - abs_path = coder.abs_root_path(file_path) - - # Check if file is already editable - if abs_path in coder.abs_fnames: - coder.io.tool_output(f"πŸ“ File '{file_path}' is already editable") - return "File is already editable" - - # Check if file exists on disk - if not os.path.isfile(abs_path): - coder.io.tool_output(f"⚠️ File '{file_path}' not found") - return "Error: File not found" - - # File exists, is not editable, might be read-only or not in context yet - was_read_only = False - if abs_path in coder.abs_read_only_fnames: - coder.abs_read_only_fnames.remove(abs_path) - was_read_only = True - - # Add to editable files - coder.abs_fnames.add(abs_path) - - if was_read_only: - coder.io.tool_output(f"πŸ“ Moved '{file_path}' from read-only to editable") - return "File is now editable (moved from read-only)" - else: - # File was not previously in context at all - coder.io.tool_output(f"πŸ“ Added '{file_path}' directly to editable context") - # Track if added during exploration? Maybe not needed for direct MakeEditable. - # coder.files_added_in_exploration.add(rel_path) # Consider if needed - return "File is now editable (added directly)" - except Exception as e: - coder.io.tool_error(f"Error in MakeEditable for '{file_path}': {str(e)}") - return f"Error: {str(e)}" diff --git a/aider/tools/make_readonly.py b/aider/tools/make_readonly.py deleted file mode 100644 index 33095b02047..00000000000 --- a/aider/tools/make_readonly.py +++ /dev/null @@ -1,53 +0,0 @@ -from aider.tools.utils.base_tool import BaseTool - - -class Tool(BaseTool): - NORM_NAME = "makereadonly" - SCHEMA = { - "type": "function", - "function": { - "name": "MakeReadonly", - "description": "Make an editable file read-only.", - "parameters": { - "type": "object", - "properties": { - "file_path": { - "type": "string", - "description": "The path to the file to make read-only.", - }, - }, - "required": ["file_path"], - }, - }, - } - - @classmethod - def execute(cls, coder, file_path): - """ - Convert an editable file to a read-only file. - - This allows the LLM to downgrade a file from editable to read-only - when it determines it no longer needs to make changes to that file. - """ - try: - # Get absolute path - abs_path = coder.abs_root_path(file_path) - - # Check if file is in editable context - if abs_path not in coder.abs_fnames: - if abs_path in coder.abs_read_only_fnames: - coder.io.tool_output(f"πŸ“š File '{file_path}' is already read-only") - return "File is already read-only" - else: - coder.io.tool_output(f"⚠️ File '{file_path}' not in context") - return "File not in context" - - # Move from editable to read-only - coder.abs_fnames.remove(abs_path) - coder.abs_read_only_fnames.add(abs_path) - - coder.io.tool_output(f"πŸ“š Made '{file_path}' read-only") - return "File is now read-only" - except Exception as e: - coder.io.tool_error(f"Error making file read-only: {str(e)}") - return f"Error: {str(e)}" diff --git a/aider/tools/remove.py b/aider/tools/remove.py deleted file mode 100644 index b8a861447cc..00000000000 --- a/aider/tools/remove.py +++ /dev/null @@ -1,75 +0,0 @@ -import time - -from aider.tools.utils.base_tool import BaseTool - - -class Tool(BaseTool): - NORM_NAME = "remove" - SCHEMA = { - "type": "function", - "function": { - "name": "Remove", - "description": ( - "Remove a file from the chat context. Should be used proactively to keep con" - "Should be used after editing a file when all edits are done " - "and the file is no longer necessary in context." - ), - "parameters": { - "type": "object", - "properties": { - "file_path": { - "type": "string", - "description": "The path to the file to remove.", - }, - }, - "required": ["file_path"], - }, - }, - } - - @classmethod - def execute(cls, coder, file_path): - """ - Explicitly remove a file from context. - - This allows the LLM to clean up its context when files are no - longer needed, keeping the context focused and efficient. - """ - try: - # Get absolute path - abs_path = coder.abs_root_path(file_path) - rel_path = coder.get_rel_fname(abs_path) - - # Check if file is in context (either editable or read-only) - removed = False - if abs_path in coder.abs_fnames: - # Don't remove if it's the last editable file and there are no read-only files - if len(coder.abs_fnames) <= 1 and not coder.abs_read_only_fnames: - coder.io.tool_output( - f"⚠️ Cannot remove '{file_path}' - it's the only file in context" - ) - return "Cannot remove - last file in context" - coder.abs_fnames.remove(abs_path) - removed = True - elif abs_path in coder.abs_read_only_fnames: - # Don't remove if it's the last read-only file and there are no editable files - if len(coder.abs_read_only_fnames) <= 1 and not coder.abs_fnames: - coder.io.tool_output( - f"⚠️ Cannot remove '{file_path}' - it's the only file in context" - ) - return "Cannot remove - last file in context" - coder.abs_read_only_fnames.remove(abs_path) - removed = True - - if not removed: - coder.io.tool_output(f"⚠️ File '{file_path}' not in context") - return "File not in context" - - # Track in recently removed - coder.recently_removed[rel_path] = {"removed_at": time.time()} - - coder.io.tool_output(f"πŸ—‘οΈ Explicitly removed '{file_path}' from context") - return "Removed file from context" - except Exception as e: - coder.io.tool_error(f"Error removing file: {str(e)}") - return f"Error: {str(e)}" diff --git a/aider/tools/utils/helpers.py b/aider/tools/utils/helpers.py index a0fbb871118..aa187a28acc 100644 --- a/aider/tools/utils/helpers.py +++ b/aider/tools/utils/helpers.py @@ -52,10 +52,14 @@ def validate_file_for_edit(coder, file_path): if abs_path not in coder.abs_fnames: if abs_path in coder.abs_read_only_fnames: - raise ToolError(f"File '{file_path}' is read-only. Use MakeEditable first.") + raise ToolError( + f"File '{file_path}' is read-only. Make editable with `ContextManager` first." + ) else: # File exists but is not in context at all - raise ToolError(f"File '{file_path}' not in context. Use View or MakeEditable first.") + raise ToolError( + f"File '{file_path}' not in context. Make editable with `ContextManager` first." + ) # Reread content immediately before potential modification content = coder.io.read_text(abs_path) diff --git a/aider/tools/view.py b/aider/tools/view.py deleted file mode 100644 index 46b8db0c99c..00000000000 --- a/aider/tools/view.py +++ /dev/null @@ -1,41 +0,0 @@ -from aider.tools.utils.base_tool import BaseTool - - -class Tool(BaseTool): - NORM_NAME = "view" - SCHEMA = { - "type": "function", - "function": { - "name": "View", - "description": ( - "View a specific file and add it to context." - "Only use this when the file is not already in the context " - "and when editing the file is necessary to accomplish the goal." - ), - "parameters": { - "type": "object", - "properties": { - "file_path": { - "type": "string", - "description": "The path to the file to view.", - }, - }, - "required": ["file_path"], - }, - }, - } - - @classmethod - def execute(cls, coder, file_path): - """ - Explicitly add a file to context as read-only. - - This gives the LLM explicit control over what files to view, - rather than relying on indirect mentions. - """ - try: - # Use the coder's helper, marking it as an explicit view request - return coder._add_file_to_context(file_path, explicit=True) - except Exception as e: - coder.io.tool_error(f"Error viewing file: {str(e)}") - return f"Error: {str(e)}" From e3645bb4d6017097174ff56f79bfba18b3ad1b8c Mon Sep 17 00:00:00 2001 From: Gopar Date: Wed, 31 Dec 2025 13:00:33 -0800 Subject: [PATCH 06/11] Standardize where to save command files --- aider/commands/load.py | 27 ++++++--- aider/commands/save.py | 43 ++++----------- aider/commands/utils/helpers.py | 4 +- aider/save_load_manager.py | 98 +++++++++++++++++++++++++++++++++ 4 files changed, 131 insertions(+), 41 deletions(-) create mode 100644 aider/save_load_manager.py diff --git a/aider/commands/load.py b/aider/commands/load.py index 00e4bc547b2..ae22ad94835 100644 --- a/aider/commands/load.py +++ b/aider/commands/load.py @@ -2,6 +2,7 @@ from aider.commands.utils.base_command import BaseCommand from aider.commands.utils.helpers import format_command_result +from aider.save_load_manager import SaveLoadManager class LoadCommand(BaseCommand): @@ -15,12 +16,13 @@ async def execute(cls, io, coder, args, **kwargs): io.tool_error("Please provide a filename containing commands to load.") return format_command_result(io, "load", "No filename provided") + manager = SaveLoadManager(coder, io) + try: - with open(args.strip(), "r", encoding=io.encoding, errors="replace") as f: - commands = f.readlines() - except FileNotFoundError: - io.tool_error(f"File not found: {args}") - return format_command_result(io, "load", f"File not found: {args}") + commands = manager.load_commands(args.strip()) + except FileNotFoundError as e: + io.tool_error(str(e)) + return format_command_result(io, "load", str(e)) except Exception as e: io.tool_error(f"Error reading file: {e}") return format_command_result(io, "load", f"Error reading file: {e}") @@ -34,6 +36,7 @@ async def execute(cls, io, coder, args, **kwargs): commands_instance = Commands(io, coder) + should_raise_at_end = None for cmd in commands: cmd = cmd.strip() if not cmd or cmd.startswith("#"): @@ -45,13 +48,18 @@ async def execute(cls, io, coder, args, **kwargs): except Exception as e: # Handle SwitchCoder exception specifically if type(e).__name__ == "SwitchCoder": - io.tool_error( - f"Command '{cmd}' is only supported in interactive mode, skipping." - ) + # SwitchCoder is raised when switching between coder types (e.g., /architect, /ask). + # This is expected behavior, not an error. But this gets in the way when running `/load` so we + # ignore it and continue processing remaining commands. + should_raise_at_end = e + continue else: # Re-raise other exceptions raise + if should_raise_at_end: + raise should_raise_at_end + return format_command_result( io, "load", f"Loaded and executed commands from {args.strip()}" ) @@ -59,7 +67,8 @@ async def execute(cls, io, coder, args, **kwargs): @classmethod def get_completions(cls, io, coder, args) -> List[str]: """Get completion options for load command.""" - return [] + manager = SaveLoadManager(coder, io) + return manager.list_files() @classmethod def get_help(cls) -> str: diff --git a/aider/commands/save.py b/aider/commands/save.py index 9b4834c92ab..4828eb892fc 100644 --- a/aider/commands/save.py +++ b/aider/commands/save.py @@ -1,8 +1,8 @@ -from pathlib import Path from typing import List from aider.commands.utils.base_command import BaseCommand from aider.commands.utils.helpers import format_command_result +from aider.save_load_manager import SaveLoadManager class SaveCommand(BaseCommand): @@ -13,38 +13,16 @@ class SaveCommand(BaseCommand): async def execute(cls, io, coder, args, **kwargs): """Execute the save command with given parameters.""" if not args.strip(): - io.tool_error("Please provide a filename to save the commands to.") - return format_command_result(io, "save", "No filename provided") + return format_command_result( + io, "save", "", "No filename provided to save the commands to" + ) - try: - with open(args.strip(), "w", encoding=io.encoding) as f: - f.write("/drop\n") - # Write commands to add editable files - for fname in sorted(coder.abs_fnames): - rel_fname = coder.get_rel_fname(fname) - f.write(f"/add {rel_fname}\n") - - # Write commands to add read-only files - for fname in sorted(coder.abs_read_only_fnames): - # Use absolute path for files outside repo root, relative path for files inside - if Path(fname).is_relative_to(coder.root): - rel_fname = coder.get_rel_fname(fname) - f.write(f"/read-only {rel_fname}\n") - else: - f.write(f"/read-only {fname}\n") - # Write commands to add read-only stubs files - for fname in sorted(coder.abs_read_only_stubs_fnames): - # Use absolute path for files outside repo root, relative path for files inside - if Path(fname).is_relative_to(coder.root): - rel_fname = coder.get_rel_fname(fname) - f.write(f"/read-only-stub {rel_fname}\n") - else: - f.write(f"/read-only-stub {fname}\n") + manager = SaveLoadManager(coder, io) - io.tool_output(f"Saved commands to {args.strip()}") - return format_command_result(io, "save", f"Saved commands to {args.strip()}") + try: + filepath = manager.save_commands(args.strip()) + return format_command_result(io, "save", f"Saved commands to {filepath}") except Exception as e: - io.tool_error(f"Error saving commands to file: {e}") return format_command_result(io, "save", f"Error saving commands to file: {e}", e) @classmethod @@ -61,7 +39,10 @@ def get_help(cls) -> str: help_text += "\nUsage:\n" help_text += " /save # Save commands to reconstruct current chat session\n" help_text += "\nExamples:\n" - help_text += " /save session.txt # Save session commands to session.txt\n" + help_text += " /save session # Save to .aider/saves/session.txt\n" + help_text += " /save session.txt # Save to .aider/saves/session.txt\n" + help_text += " /save ./session.txt # Save to ./session.txt (explicit path)\n" + help_text += " /save /tmp/session.txt # Save to /tmp/session.txt (absolute path)\n" help_text += "\nThe saved file contains commands that can be used with /load to restore\n" help_text += "the current chat session, including all editable and read-only files.\n" help_text += "The file starts with /drop to clear existing files, then adds all files.\n" diff --git a/aider/commands/utils/helpers.py b/aider/commands/utils/helpers.py index bb55e782ba9..665bf2492db 100644 --- a/aider/commands/utils/helpers.py +++ b/aider/commands/utils/helpers.py @@ -90,7 +90,9 @@ def validate_file_access(io, coder, file_path: str, require_in_chat: bool = Fals return True -def format_command_result(io, command_name: str, success_message: str, error: Exception = None): +def format_command_result( + io, command_name: str, success_message: str, error: Exception | str = None +): """ Format command execution result consistently. diff --git a/aider/save_load_manager.py b/aider/save_load_manager.py new file mode 100644 index 00000000000..f7ea7a4720b --- /dev/null +++ b/aider/save_load_manager.py @@ -0,0 +1,98 @@ +import os +from pathlib import Path +from typing import List + + +class SaveLoadManager: + """Manager for saving and loading command files.""" + + def __init__(self, coder, io): + self.coder = coder + self.io = io + + def get_saves_directory(self) -> Path: + """Get the saves directory, creating it if necessary.""" + saves_dir = Path(self.coder.abs_root_path(".aider/saves")) + os.makedirs(saves_dir, exist_ok=True) + return saves_dir + + def resolve_filepath(self, filename: str) -> Path: + """Resolve a filename to an absolute path, using saves directory if needed.""" + filepath = Path(filename) + + # If it's a simple filename (no directory separators), save to .aider/saves/ + if not filepath.is_absolute() and str(filepath) == filepath.name: + saves_dir = self.get_saves_directory() + filepath = saves_dir / filepath + + return filepath + + def save_commands(self, filename: str) -> Path: + """Save commands to reconstruct the current chat session to a file.""" + filepath = self.resolve_filepath(filename) + + try: + # Ensure parent directory exists + os.makedirs(filepath.parent, exist_ok=True) + + with open(filepath, "w", encoding=self.io.encoding) as f: + f.write("/drop\n") + # Write commands to add editable files + for fname in sorted(self.coder.abs_fnames): + rel_fname = self.coder.get_rel_fname(fname) + f.write(f"/add {rel_fname}\n") + + # Write commands to add read-only files + for fname in sorted(self.coder.abs_read_only_fnames): + # Use absolute path for files outside repo root, relative path for files inside + if Path(fname).is_relative_to(self.coder.root): + rel_fname = self.coder.get_rel_fname(fname) + f.write(f"/read-only {rel_fname}\n") + else: + f.write(f"/read-only {fname}\n") + # Write commands to add read-only stubs files + for fname in sorted(self.coder.abs_read_only_stubs_fnames): + # Use absolute path for files outside repo root, relative path for files inside + if Path(fname).is_relative_to(self.coder.root): + rel_fname = self.coder.get_rel_fname(fname) + f.write(f"/read-only-stub {rel_fname}\n") + else: + f.write(f"/read-only-stub {fname}\n") + + return filepath + except Exception as e: + raise IOError(f"Error saving commands to file: {e}") + + def load_commands(self, filename: str) -> List[str]: + """Load commands from a file.""" + filepath = self.resolve_filepath(filename) + + try: + with open(filepath, "r", encoding=self.io.encoding, errors="replace") as f: + commands = f.readlines() + return [ + cmd.strip() for cmd in commands if cmd.strip() and not cmd.strip().startswith("#") + ] + except FileNotFoundError: + raise FileNotFoundError(f"File not found: {filepath}") + except Exception as e: + raise IOError(f"Error reading file: {e}") + + def list_files(self) -> List[str]: + """Return a list of all filenames (without extensions) in the saves directory. + + Returns: + List[str]: List of filenames without extensions, sorted alphabetically + """ + try: + saves_dir = self.get_saves_directory() + + if not saves_dir.exists(): + return [] + + # Get all files (not directories) in the saves directory + save_files = [f.name for f in saves_dir.iterdir() if f.is_file()] + return sorted(save_files) + except Exception: + # Return empty list on any error + return [] From 92f61c63bb0db5715f5d965f196c71439c8ba46b Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 17:43:48 -0500 Subject: [PATCH 07/11] #342: Gate scrapping behind yes_command_always, add arg to prevent scraping entirely --- aider/args.py | 6 ++++++ aider/coders/base_coder.py | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/aider/args.py b/aider/args.py index 1178e2cbadf..7d5a574e08a 100644 --- a/aider/args.py +++ b/aider/args.py @@ -856,6 +856,12 @@ def get_parser(default_config_files, git_root): help="Never prompt for or attempt to install Playwright for web scraping (default: False).", default=False, ) + group.add_argument( + "--disable-scraping", + action="store_true", + help="Disable automatic url scraping entirely web scraping (default: False).", + default=False, + ) group.add_argument( "--file", action="append", diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 6aad6b43bf0..ee858d26d0a 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1637,7 +1637,7 @@ async def check_and_open_urls(self, exc, friendly_msg=None): async def check_for_urls(self, inp: str) -> List[str]: """Check input for URLs and offer to add them to the chat.""" - if not self.detect_urls: + if not self.detect_urls or self.args.disable_scraping: return inp # Exclude double quotes from the matched URL characters @@ -1649,7 +1649,11 @@ async def check_for_urls(self, inp: str) -> List[str]: if url not in self.rejected_urls: url = url.rstrip(".',\"") if await self.io.confirm_ask( - "Add URL to the chat?", subject=url, group=group, allow_never=True + "Add URL to the chat?", + subject=url, + group=group, + allow_never=True, + explicit_yes_required=self.args.yes_always_commands, ): inp += "\n\n" inp += await self.commands.do_run("web", url, return_content=True) From 0b0c1e275d9560c1c2d5c784bcf56e38f97446b3 Mon Sep 17 00:00:00 2001 From: Gopar Date: Wed, 31 Dec 2025 15:03:37 -0800 Subject: [PATCH 08/11] Move save/load manager under utils module --- aider/commands/load.py | 2 +- aider/commands/save.py | 2 +- aider/{ => commands/utils}/save_load_manager.py | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename aider/{ => commands/utils}/save_load_manager.py (100%) diff --git a/aider/commands/load.py b/aider/commands/load.py index ae22ad94835..266fce9ca38 100644 --- a/aider/commands/load.py +++ b/aider/commands/load.py @@ -2,7 +2,7 @@ from aider.commands.utils.base_command import BaseCommand from aider.commands.utils.helpers import format_command_result -from aider.save_load_manager import SaveLoadManager +from aider.commands.utils.save_load_manager import SaveLoadManager class LoadCommand(BaseCommand): diff --git a/aider/commands/save.py b/aider/commands/save.py index 4828eb892fc..be04542d16a 100644 --- a/aider/commands/save.py +++ b/aider/commands/save.py @@ -2,7 +2,7 @@ from aider.commands.utils.base_command import BaseCommand from aider.commands.utils.helpers import format_command_result -from aider.save_load_manager import SaveLoadManager +from aider.commands.utils.save_load_manager import SaveLoadManager class SaveCommand(BaseCommand): diff --git a/aider/save_load_manager.py b/aider/commands/utils/save_load_manager.py similarity index 100% rename from aider/save_load_manager.py rename to aider/commands/utils/save_load_manager.py From be14599ec04b5bd3bafc91946555c5a3a672a345 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 18:11:43 -0500 Subject: [PATCH 09/11] Re-add weak-model command --- aider/commands/__init__.py | 3 + aider/commands/weak_model.py | 121 +++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) diff --git a/aider/commands/__init__.py b/aider/commands/__init__.py index 272a22eca4f..a6e27509556 100644 --- a/aider/commands/__init__.py +++ b/aider/commands/__init__.py @@ -71,6 +71,7 @@ ) from .utils.registry import CommandRegistry from .voice import VoiceCommand +from .weak_model import WeakModelCommand from .web import WebCommand # Register commands @@ -110,6 +111,7 @@ CommandRegistry.register(ReadOnlyStubCommand) CommandRegistry.register(AddCommand) CommandRegistry.register(ModelCommand) +CommandRegistry.register(WeakModelCommand) CommandRegistry.register(WebCommand) CommandRegistry.register(LintCommand) CommandRegistry.register(TestCommand) @@ -217,6 +219,7 @@ def __init__(self, *args, **kwargs): "ReadOnlyStubCommand", "AddCommand", "ModelCommand", + "WeakModelCommand", "WebCommand", "LintCommand", "TestCommand", diff --git a/aider/commands/weak_model.py b/aider/commands/weak_model.py index e69de29bb2d..cb7b5d37ef5 100644 --- a/aider/commands/weak_model.py +++ b/aider/commands/weak_model.py @@ -0,0 +1,121 @@ +from typing import List + +import aider.models as models +from aider.commands.utils.base_command import BaseCommand +from aider.commands.utils.helpers import format_command_result + + +class WeakModelCommand(BaseCommand): + NORM_NAME = "weak-model" + DESCRIPTION = "Switch the Weak Model to a new LLM" + + @classmethod + async def execute(cls, io, coder, args, **kwargs): + """Execute the weak_model command with given parameters.""" + arg_split = args.split(" ", 1) + model_name = arg_split[0].strip() + if not model_name: + # If no model name provided, show current weak model + current_weak_model = coder.main_model.weak_model.name + io.tool_output(f"Current weak model: {current_weak_model}") + return format_command_result( + io, "weak-model", f"Displayed current weak model: {current_weak_model}" + ) + + # Create a new model with the same main model and editor model, but updated weak model + model = models.Model( + coder.main_model.name, + editor_model=coder.main_model.editor_model.name, + weak_model=model_name, + io=io, + ) + await models.sanity_check_models(io, model) + + if len(arg_split) > 1: + # implement architect coder-like generation call for weak model + message = arg_split[1].strip() + + # Store the original model configuration + original_main_model = coder.main_model + original_edit_format = coder.edit_format + + # Create a temporary coder with the new model + from aider.coders import Coder + + kwargs = dict() + kwargs["main_model"] = model + kwargs["edit_format"] = coder.edit_format # Keep the same edit format + kwargs["suggest_shell_commands"] = False + kwargs["total_cost"] = coder.total_cost + kwargs["num_cache_warming_pings"] = 0 + kwargs["summarize_from_coder"] = False + + new_kwargs = dict(io=io, from_coder=coder) + new_kwargs.update(kwargs) + + temp_coder = await Coder.create(**new_kwargs) + temp_coder.cur_messages = [] + temp_coder.done_messages = [] + + verbose = kwargs.get("verbose", False) + if verbose: + temp_coder.show_announcements() + + try: + await temp_coder.generate(user_message=message, preproc=False) + coder.move_back_cur_messages( + f"Weak model {model_name} made those changes to the files." + ) + coder.total_cost = temp_coder.total_cost + coder.aider_commit_hashes = temp_coder.aider_commit_hashes + + # Restore the original model configuration + from aider.commands import SwitchCoder + + raise SwitchCoder(main_model=original_main_model, edit_format=original_edit_format) + except Exception as e: + # If there's an error, still restore the original model + if not isinstance(e, SwitchCoder): + io.tool_error(str(e)) + raise SwitchCoder( + main_model=original_main_model, edit_format=original_edit_format + ) + else: + # Re-raise SwitchCoder if that's what was thrown + raise + else: + from aider.commands import SwitchCoder + + raise SwitchCoder(main_model=model, edit_format=coder.edit_format) + + @classmethod + def get_completions(cls, io, coder, args) -> List[str]: + """Get completion options for weak_model command.""" + return models.get_chat_model_names() + + @classmethod + def get_help(cls) -> str: + """Get help text for the weak_model command.""" + help_text = super().get_help() + help_text += "\nUsage:\n" + help_text += " /weak_model # Switch to a new weak model\n" + help_text += ( + " /weak_model # Use a specific weak model for a single" + " prompt\n" + ) + help_text += "\nExamples:\n" + help_text += ( + " /weak_model gpt-4o-mini # Switch to GPT-4o Mini as weak model\n" + ) + help_text += ( + " /weak_model claude-3-haiku # Switch to Claude 3 Haiku as weak model\n" + ) + help_text += ' /weak_model o1-mini "review this code" # Use o1-mini to review code\n' + help_text += ( + "\nWhen switching weak models, the main model and editor model remain unchanged.\n" + ) + help_text += ( + "\nIf you provide a prompt after the model name, that weak model will be used\n" + ) + help_text += "just for that prompt, then you'll return to your original weak model.\n" + return help_text From 257bcfe166db6f444c4087e5306e5b885b7630d2 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 22:58:02 -0500 Subject: [PATCH 10/11] profiler to token_profiler --- aider/coders/base_coder.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index c1da30d2ef5..9c21669a56b 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -376,7 +376,9 @@ def __init__( self.message_tokens_sent = 0 self.message_tokens_received = 0 - self.profiler = TokenProfiler(enable_printing=getattr(args, 'show_speed', False) if args else False) + self.token_profiler = TokenProfiler( + enable_printing=getattr(args, "show_speed", False) if args else False + ) self.verbose = verbose self.abs_fnames = set() self.abs_read_only_fnames = set() @@ -2991,7 +2993,7 @@ async def send(self, messages, model=None, functions=None, tools=None): self.partial_response_function_call = dict() completion = None - self.profiler.start() + self.token_profiler.start() try: hash_object, completion = await model.send_completion( @@ -3017,7 +3019,7 @@ async def send(self, messages, model=None, functions=None, tools=None): ex_info = LiteLLMExceptions().get_ex_info(err) if ex_info.name == "ContextWindowExceededError": # Still calculate costs for context window errors - self.profiler.on_error() + self.token_profiler.on_error() self.calculate_and_show_tokens_and_cost(messages, completion) raise except KeyboardInterrupt as kbi: @@ -3108,7 +3110,7 @@ async def show_send_output_stream(self, completion): try: if chunk.choices[0].delta.tool_calls: received_content = True - self.profiler.on_token() + self.token_profiler.on_token() for tool_call_chunk in chunk.choices[0].delta.tool_calls: self.tool_reflection = True @@ -3136,7 +3138,7 @@ async def show_send_output_stream(self, completion): self.io.update_spinner_suffix(v) received_content = True - self.profiler.on_token() + self.token_profiler.on_token() except AttributeError: pass @@ -3156,7 +3158,7 @@ async def show_send_output_stream(self, completion): text += reasoning_content self.got_reasoning_content = True received_content = True - self.profiler.on_token() + self.token_profiler.on_token() self.io.update_spinner_suffix(reasoning_content) self.partial_response_reasoning_content += reasoning_content @@ -3169,7 +3171,7 @@ async def show_send_output_stream(self, completion): text += content received_content = True - self.profiler.on_token() + self.token_profiler.on_token() self.io.update_spinner_suffix(content) except AttributeError: pass @@ -3409,7 +3411,9 @@ def calculate_and_show_tokens_and_cost(self, messages, completion=None): if cache_hit_tokens: tokens_report += f", {format_tokens(cache_hit_tokens)} cache hit" tokens_report += f", {format_tokens(self.message_tokens_received)} received." - tokens_report = self.profiler.add_to_usage_report(tokens_report, self.message_tokens_sent, self.message_tokens_received) + tokens_report = self.token_profiler.add_to_usage_report( + tokens_report, self.message_tokens_sent, self.message_tokens_received + ) if not self.main_model.info.get("input_cost_per_token"): self.usage_report = tokens_report From 2ca31060629439b1f6552eee852bcfafb227235a Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Wed, 31 Dec 2025 22:58:31 -0500 Subject: [PATCH 11/11] Formatting --- aider/helpers/profiler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aider/helpers/profiler.py b/aider/helpers/profiler.py index ddd62f8e574..7f332612fc5 100644 --- a/aider/helpers/profiler.py +++ b/aider/helpers/profiler.py @@ -134,7 +134,9 @@ def get_elapsed(self) -> Optional[float]: return time.time() - self._start_time - def add_to_usage_report(self, usage_report: Optional[str], input_tokens: int = 0, output_tokens: int = 0) -> str: + def add_to_usage_report( + self, usage_report: Optional[str], input_tokens: int = 0, output_tokens: int = 0 + ) -> str: """ Add speed report to usage_report and return the combined string. @@ -158,5 +160,3 @@ def add_to_usage_report(self, usage_report: Optional[str], input_tokens: int = 0 return usage_report + speed_report return usage_report - -