From 868fa5e167e6b99b1355f0bda7b61cea24482d2e Mon Sep 17 00:00:00 2001 From: 1Broseidon Date: Mon, 8 Dec 2025 17:01:21 -0600 Subject: [PATCH 01/28] feat: add OutputContainer with markdown rendering and cost tracking Co-authored-by: aider (openrouter/moonshotai/kimi-k2-thinking) --- aider/tui/widgets/output.py | 164 ++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 aider/tui/widgets/output.py diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py new file mode 100644 index 00000000000..6e89912c6b8 --- /dev/null +++ b/aider/tui/widgets/output.py @@ -0,0 +1,164 @@ +"""Output widgets for Aider TUI.""" + +import re +from io import StringIO + +from rich.console import Console +from rich.text import Text +from textual.message import Message +from textual.widgets import RichLog + + +class CostUpdate(Message): + """Message to update cost in footer.""" + + def __init__(self, cost: float): + self.cost = cost + super().__init__() + + +class OutputContainer(RichLog): + """Single scrollable output area.""" + + def __init__(self, **kwargs): + """Initialize output container.""" + super().__init__(auto_scroll=True, wrap=True, markup=True, **kwargs) + def start_task(self, task_id: str, title: str, task_type: str = "general"): + """Start a new task section (visual separator).""" + self._flush_buffer() + self.write(Text(f"\n─── {title} ───", style="dim")) + + def add_output(self, text: str, task_id: str = None): + """Add output text with intelligent whitespace handling.""" + if not text: + return + + # Clean up the text first + text = self._normalize_whitespace(text) + text = self._format_markdown(text) + text = self._format_code_blocks(text) + text = self._format_message_type(text) + + # Buffer for complete lines + self._stream_buffer += text + + # Only flush complete lines (RichLog.write adds newline after each call) + while '\n' in self._stream_buffer: + line, self._stream_buffer = self._stream_buffer.split('\n', 1) + self._write_line(line) +class CostUpdate(Message): + """Message to update cost in footer.""" + + def __init__(self, cost: float): + self.cost = cost + super().__init__() + + +class OutputContainer(RichLog): + """Single scrollable output area.""" + + def __init__(self, **kwargs): + """Initialize output container.""" + super().__init__(auto_scroll=True, wrap=True, markup=True, **kwargs) + self._stream_buffer = "" + + def start_task(self, task_id: str, title: str, task_type: str = "general"): + """Start a new task section (visual separator).""" + self._flush_buffer() + self.write(Text(f"\n─── {title} ───", style="dim")) + + def add_output(self, text: str, task_id: str = None): + """Add output text, buffering for complete lines.""" + if not text: + return + + self._stream_buffer += text + + # Only flush complete lines (RichLog.write adds newline after each call) + while '\n' in self._stream_buffer: + line, self._stream_buffer = self._stream_buffer.split('\n', 1) + self._write_line(line) + + def _flush_buffer(self): + """Flush any remaining buffered content.""" + if self._stream_buffer: + self._write_line(self._stream_buffer) + self._stream_buffer = "" + + def _write_line(self, text: str): + """Write a single line to the display.""" + if not text: + return + + # Check for cost information and emit update + self._check_for_cost(text) + + # Strip Rich markup tags like [blue], [/], [bold], etc. + text = self._strip_rich_markup(text) + + # Handle ANSI codes first + if '\x1b' in text: + self.write(Text.from_ansi(text)) + return + + # Try to render as markdown for formatting + if self._has_markdown(text): + try: + from rich.markdown import Markdown + width = max(self.size.width - 2, 40) + console = Console(file=StringIO(), force_terminal=True, width=width) + md = Markdown(text) + with console.capture() as capture: + console.print(md, end="") + rendered = capture.get().rstrip('\n') + if rendered: + self.write(Text.from_ansi(rendered)) + return + except Exception: + pass + + # Fallback to plain text + self.write(Text(text)) + + def _has_markdown(self, text: str) -> bool: + """Check if text has markdown formatting.""" + # Look for common markdown patterns + patterns = ['**', '__', '`', '```', '##', '- ', '* ', '1. '] + return any(p in text for p in patterns) + + def _check_for_cost(self, text: str): + """Check for cost info and post message to update footer.""" + # Look for pattern like "$0.0086 session" or "$X.XX session" + match = re.search(r'\$(\d+\.?\d*)\s*session', text) + if match: + try: + cost = float(match.group(1)) + self.post_message(CostUpdate(cost)) + except (ValueError, AttributeError): + pass + + def _strip_rich_markup(self, text: str) -> str: + """Remove Rich console markup tags from text.""" + # Pattern matches [tagname] and [/tagname] and [/] + # Common tags: [blue], [bold], [red], [green], [/], [/blue], etc. + pattern = r'\[/?(?:blue|red|green|yellow|bold|dim|italic|underline|strike|reverse|blink|/)*\]' + return re.sub(pattern, '', text) + + def add_markdown(self, text: str): + """Add markdown content (renders via Rich).""" + self._flush_buffer() + try: + console = Console(file=StringIO(), force_terminal=True, width=self.size.width - 4) + from rich.markdown import Markdown + md = Markdown(text) + with console.capture() as capture: + console.print(md) + rendered = capture.get() + self.write(Text.from_ansi(rendered)) + except Exception: + self.write(Text(text)) + + def clear_output(self): + """Clear all output.""" + self._stream_buffer = "" + self.clear() \ No newline at end of file From a595267267570a0a8dc24278269d8f24e0de7c36 Mon Sep 17 00:00:00 2001 From: 1Broseidon Date: Tue, 9 Dec 2025 13:37:32 -0600 Subject: [PATCH 02/28] feat: add experimental tui --- aider/args.py | 6 + aider/commands.py | 7 + aider/main.py | 61 +++- aider/tui/__init__.py | 80 ++++ aider/tui/app.py | 546 ++++++++++++++++++++++++++++ aider/tui/io.py | 368 +++++++++++++++++++ aider/tui/styles.tcss | 61 ++++ aider/tui/widgets/__init__.py | 15 + aider/tui/widgets/completion_bar.py | 287 +++++++++++++++ aider/tui/widgets/footer.py | 152 ++++++++ aider/tui/widgets/input_area.py | 181 +++++++++ aider/tui/widgets/output.py | 248 ++++++------- aider/tui/widgets/status_bar.py | 228 ++++++++++++ aider/tui/worker.py | 154 ++++++++ pyproject.toml | 1 + requirements/requirements-tui.in | 1 + 16 files changed, 2266 insertions(+), 130 deletions(-) create mode 100644 aider/tui/__init__.py create mode 100644 aider/tui/app.py create mode 100644 aider/tui/io.py create mode 100644 aider/tui/styles.tcss create mode 100644 aider/tui/widgets/__init__.py create mode 100644 aider/tui/widgets/completion_bar.py create mode 100644 aider/tui/widgets/footer.py create mode 100644 aider/tui/widgets/input_area.py create mode 100644 aider/tui/widgets/status_bar.py create mode 100644 aider/tui/worker.py create mode 100644 requirements/requirements-tui.in diff --git a/aider/args.py b/aider/args.py index 51b8480bd43..c03baae84be 100644 --- a/aider/args.py +++ b/aider/args.py @@ -629,6 +629,12 @@ def get_parser(default_config_files, git_root): default=False, help="Perform a dry run without modifying files (default: False)", ) + group.add_argument( + "--tui", + action="store_true", + default=False, + help="Launch Textual TUI interface (experimental)", + ) group.add_argument( "--skip-sanity-check-repo", action="store_true", diff --git a/aider/commands.py b/aider/commands.py index 7162be45d08..bd6aa8e658e 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -1314,6 +1314,13 @@ async def cmd_exit(self, args): await asyncio.sleep(0) + # Check if running in TUI mode - use graceful exit to restore terminal + if hasattr(self.io, 'request_exit'): + self.io.request_exit() + # Give TUI time to process the exit message + await asyncio.sleep(0.5) + return + try: if self.coder.args.linear_output: os._exit(0) diff --git a/aider/main.py b/aider/main.py index 0ac1bbd54ff..cf10bc88639 100644 --- a/aider/main.py +++ b/aider/main.py @@ -89,6 +89,23 @@ def guessed_wrong_repo(io, git_root, fnames, git_dname): return str(check_repo) +def validate_tui_args(args): + """Validate that incompatible flags aren't used with --tui""" + if not args.tui: + return + + incompatible = [] + if args.vim: + incompatible.append("--vim") + if not args.fancy_input: + incompatible.append("--no-fancy-input") + + if incompatible: + print(f"Error: --tui is incompatible with: {', '.join(incompatible)}") + print("Remove these flags or use standard CLI mode.") + sys.exit(1) + + async def make_new_repo(git_root, io): try: repo = git.Repo.init(git_root) @@ -586,14 +603,35 @@ def get_io(pretty): verbose=args.verbose, ) - io = get_io(args.pretty) - try: - io.rule() - except UnicodeEncodeError as err: - if not io.pretty: - raise err - io = get_io(False) - io.tool_warning("Terminal does not support pretty output (UnicodeDecodeError)") + # Validate TUI arguments + validate_tui_args(args) + + # TUI mode - create TUI-specific IO + output_queue = None + input_queue = None + if args.tui: + try: + from aider.tui import create_tui_io + args.linear_output = True + print("Starting aider TUI...", flush=True) + io, output_queue, input_queue = create_tui_io(args, editing_mode) + except ImportError as e: + print("Error: --tui requires 'textual' package") + print("Install with: pip install aider-ce[tui]") + print(f"Import error: {e}") + sys.exit(1) + else: + io = get_io(args.pretty) + + # Only do CLI-specific initialization if not in TUI mode + if not args.tui: + try: + io.rule() + except UnicodeEncodeError as err: + if not io.pretty: + raise err + io = get_io(False) + io.tool_warning("Terminal does not support pretty output (UnicodeDecodeError)") # Process any environment variables set via --set-env if args.set_env: @@ -1148,6 +1186,13 @@ def get_io(pretty): # Don't show errors for auto-load to avoid interrupting the user experience pass + # TUI mode - launch Textual interface + if args.tui: + from aider.tui import launch_tui + return_code = await launch_tui(coder, output_queue, input_queue) + return await graceful_exit(coder, return_code) + + # Standard CLI mode - main loop while True: try: coder.ok_to_warm_cache = bool(args.cache_keepalive_pings) diff --git a/aider/tui/__init__.py b/aider/tui/__init__.py new file mode 100644 index 00000000000..286c775909c --- /dev/null +++ b/aider/tui/__init__.py @@ -0,0 +1,80 @@ +"""Textual TUI interface for Aider. + +This package provides an experimental TUI (Terminal User Interface) for aider +using the Textual framework. Launch with: aider-ce --tui +""" + +import queue + +from .app import AiderApp +from .io import TextualInputOutput +from .worker import CoderWorker + +__all__ = ["AiderApp", "TextualInputOutput", "CoderWorker", "create_tui_io", "launch_tui"] + + +def create_tui_io(args, editing_mode): + """Create TUI IO instance and communication queues. + + Args: + args: Parsed command line arguments + editing_mode: EditingMode.VI or EditingMode.EMACS + + Returns: + Tuple of (io, output_queue, input_queue) + """ + output_queue = queue.Queue() + input_queue = queue.Queue() + + io = TextualInputOutput( + output_queue=output_queue, + input_queue=input_queue, + pretty=True, + yes=args.yes_always, + input_history_file=args.input_history_file, + chat_history_file=args.chat_history_file, + input=None, + output=None, + user_input_color=args.user_input_color, + tool_output_color=args.tool_output_color, + tool_warning_color=args.tool_warning_color, + tool_error_color=args.tool_error_color, + completion_menu_color=args.completion_menu_color, + completion_menu_bg_color=args.completion_menu_bg_color, + completion_menu_current_color=args.completion_menu_current_color, + completion_menu_current_bg_color=args.completion_menu_current_bg_color, + assistant_output_color=args.assistant_output_color, + code_theme=args.code_theme, + dry_run=args.dry_run, + encoding=args.encoding, + line_endings=args.line_endings, + llm_history_file=args.llm_history_file, + editingmode=editing_mode, + fancy_input=False, + multiline_mode=args.multiline, + notifications=args.notifications, + notifications_command=args.notifications_command, + verbose=args.verbose, + ) + + return io, output_queue, input_queue + + +async def launch_tui(coder, output_queue, input_queue): + """Launch the TUI application. + + Args: + coder: Initialized Coder instance + output_queue: Queue for output messages + input_queue: Queue for input messages + + Returns: + Exit code from TUI + """ + worker = CoderWorker(coder, output_queue, input_queue) + app = AiderApp(worker, output_queue, input_queue) + + return_code = await app.run_async() + + worker.stop() + return return_code if return_code else 0 diff --git a/aider/tui/app.py b/aider/tui/app.py new file mode 100644 index 00000000000..b4470839445 --- /dev/null +++ b/aider/tui/app.py @@ -0,0 +1,546 @@ +"""Main Textual application for Aider TUI.""" + +import queue + +from textual.app import App, ComposeResult +from textual.binding import Binding +from textual.theme import Theme + +from .widgets import ( + AiderFooter, + CompletionBar, + InputArea, + OutputContainer, + StatusBar, +) +from .widgets.output import CostUpdate + +# Aider theme - dark with green accent +AIDER_THEME = Theme( + name="aider", + primary="#00aa00", # Aider green + secondary="#888888", + accent="#00aa00", + foreground="#ffffff", + background="#0d0d0d", # Near black + success="#00aa00", + warning="#ffaa00", + error="#ff3333", + surface="#1a1a1a", # Slightly lighter than background + panel="#262626", + dark=True, +) + + +class AiderApp(App): + """Main Textual application for Aider TUI.""" + + CSS_PATH = "styles.tcss" + + BINDINGS = [ + Binding("ctrl+c", "quit", "Quit", show=True), + Binding("ctrl+l", "clear_output", "Clear", show=True), + ] + + def __init__(self, coder_worker, output_queue, input_queue): + """Initialize the Aider TUI app.""" + super().__init__() + self.worker = coder_worker + self.output_queue = output_queue + self.input_queue = input_queue + # Cache for code symbols (functions, classes, variables) + self._symbols_cache = None + self._symbols_files_hash = None + + # Register and set aider theme + self.register_theme(AIDER_THEME) + self.theme = "aider" + + def compose(self) -> ComposeResult: + """Create child widgets.""" + coder = self.worker.coder + model_name = coder.main_model.name if coder.main_model else "Unknown" + aider_mode = getattr(coder, 'edit_format', 'code') or 'code' + + # Get project name (just the folder name, not full path) + project_name = "" + if coder.repo: + project_name = coder.repo.root.name if hasattr(coder.repo.root, 'name') else str(coder.repo.root).split('/')[-1] + else: + project_name = "No repo" + + # Get history file path from coder's io + history_file = getattr(coder.io, 'input_history_file', None) + + # Simple vertical layout - no header, footer has all info + # Git info loaded in on_mount to avoid blocking startup + yield OutputContainer(id="output") + yield StatusBar(id="status-bar") + yield InputArea(history_file=history_file, id="input") + yield AiderFooter( + model_name=model_name, + project_name=project_name, + git_branch="", # Loaded async in on_mount + aider_mode=aider_mode, + id="footer" + ) + + def on_mount(self): + """Called when app starts.""" + self.set_interval(0.05, self.check_output_queue) + self.worker.start() + self.query_one("#input").focus() + + # Load git info in background to avoid blocking startup + self.call_later(self._load_git_info) + + def _load_git_info(self): + """Load git branch and dirty count (deferred to avoid blocking startup).""" + footer = self.query_one(AiderFooter) + if self.worker.coder.repo: + try: + branch = self.worker.coder.repo.get_head_branch_name() or "main" + dirty = self.worker.coder.repo.get_dirty_files() + footer.update_git(branch, len(dirty) if dirty else 0) + except Exception: + footer.update_git("main", 0) + + def check_output_queue(self): + """Process messages from coder worker.""" + try: + while True: + msg = self.output_queue.get_nowait() + self.handle_output_message(msg) + except queue.Empty: + pass + + def handle_output_message(self, msg): + """Route output messages to appropriate handlers.""" + msg_type = msg['type'] + + if msg_type == 'output': + self.add_output(msg['text'], msg.get('task_id')) + elif msg_type == 'start_response': + # Start a new LLM response with streaming + self.run_worker(self._start_response()) + elif msg_type == 'stream_chunk': + # Stream a chunk of LLM response + self.run_worker(self._stream_chunk(msg['text'])) + elif msg_type == 'end_response': + # End the current LLM response + self.run_worker(self._end_response()) + elif msg_type == 'start_task': + self.start_task(msg['task_id'], msg['title'], msg.get('task_type')) + elif msg_type == 'confirmation': + self.show_confirmation(msg) + elif msg_type == 'spinner': + self.update_spinner(msg) + elif msg_type == 'ready_for_input': + self.enable_input(msg) + footer = self.query_one(AiderFooter) + footer.stop_spinner() + elif msg_type == 'error': + self.show_error(msg['message']) + elif msg_type == 'cost_update': + footer = self.query_one(AiderFooter) + footer.update_cost(msg.get('cost', 0)) + elif msg_type == 'exit': + # Graceful exit requested - let Textual clean up terminal properly + self.action_quit() + elif msg_type == 'mode_change': + # Update footer with new chat mode + footer = self.query_one(AiderFooter) + footer.update_mode(msg.get('mode', 'code')) + + def add_output(self, text, task_id=None): + """Add output to the output container.""" + output_container = self.query_one("#output", OutputContainer) + output_container.add_output(text, task_id) + + async def _start_response(self): + """Start a new LLM response (async helper).""" + output_container = self.query_one("#output", OutputContainer) + await output_container.start_response() + + async def _stream_chunk(self, text: str): + """Stream a chunk to the current response (async helper).""" + output_container = self.query_one("#output", OutputContainer) + await output_container.stream_chunk(text) + + async def _end_response(self): + """End the current LLM response (async helper).""" + output_container = self.query_one("#output", OutputContainer) + await output_container.end_response() + + def add_user_message(self, text: str): + """Add a user message to output.""" + output_container = self.query_one("#output", OutputContainer) + output_container.add_user_message(text) + + def start_task(self, task_id, title, task_type="general"): + """Start a new task section.""" + output_container = self.query_one("#output", OutputContainer) + output_container.start_task(task_id, title, task_type) + + def show_confirmation(self, msg): + """Show inline confirmation bar.""" + # Disable input while confirm bar is active + input_area = self.query_one("#input", InputArea) + input_area.disabled = True + + # Show confirmation in status bar + status_bar = self.query_one("#status-bar", StatusBar) + status_bar.show_confirm(msg['question'], show_all=True) + + def update_spinner(self, msg): + """Update spinner in footer.""" + footer = self.query_one(AiderFooter) + action = msg.get('action', 'start') + + if action == 'start': + footer.start_spinner(msg.get('text', '')) + elif action == 'update': + footer.spinner_text = msg.get('text', '') + elif action == 'stop': + footer.stop_spinner() + + def enable_input(self, msg): + """Enable input and update autocomplete data.""" + input_area = self.query_one("#input", InputArea) + input_area.disabled = False # Ensure input is enabled + files = msg.get('files', []) + commands = msg.get('commands', []) + input_area.update_autocomplete_data(files, commands) + input_area.focus() + + def show_error(self, message): + """Show error notification.""" + status_bar = self.query_one("#status-bar", StatusBar) + status_bar.show_notification(f"Error: {message}", severity="error", timeout=10) + + def on_input_submitted(self, event): + """Handle input submission.""" + user_input = event.value + + if not user_input.strip(): + return + + # Save to history before clearing + input_area = self.query_one("#input", InputArea) + input_area.save_to_history(user_input) + + event.input.value = "" + + # Show user's message in output + self.add_user_message(user_input) + + # Update footer to show processing + footer = self.query_one(AiderFooter) + footer.start_spinner("Thinking...") + + self.input_queue.put({'text': user_input}) + + def action_clear_output(self): + """Clear all output.""" + output_container = self.query_one("#output", OutputContainer) + output_container.clear_output() + + def action_quit(self): + """Quit the application.""" + # Prevent multiple quit attempts + if hasattr(self, '_quitting') and self._quitting: + return + self._quitting = True + + # Show shutdown message + status_bar = self.query_one("#status-bar", StatusBar) + status_bar.show_notification("Shutting down...", severity="warning", timeout=None) + + # Delay exit to allow status bar to render + self.set_timer(0.3, self._do_quit) + + def _do_quit(self): + """Perform the actual quit after UI updates.""" + self.worker.stop() + self.exit() + + def on_cost_update(self, message: CostUpdate): + """Handle cost update from output.""" + footer = self.query_one(AiderFooter) + footer.cost = message.cost + footer.refresh() + + def on_status_bar_confirm_response(self, message: StatusBar.ConfirmResponse): + """Handle confirmation response from status bar.""" + # Re-enable input + input_area = self.query_one("#input", InputArea) + input_area.disabled = False + input_area.focus() + + self.input_queue.put({'confirmed': message.result}) + + # Commands that use path-based completion + PATH_COMPLETION_COMMANDS = {"/read-only", "/read-only-stub", "/load", "/save"} + + def _extract_symbols(self) -> set[str]: + """Extract code symbols from files in chat using Pygments.""" + coder = self.worker.coder + + # Get current files in chat + inchat_files = [] + if hasattr(coder, 'abs_fnames'): + inchat_files.extend(coder.abs_fnames) + if hasattr(coder, 'abs_read_only_fnames'): + inchat_files.extend(coder.abs_read_only_fnames) + + # Check if cache is still valid + files_hash = hash(tuple(sorted(inchat_files))) + if self._symbols_cache is not None and self._symbols_files_hash == files_hash: + return self._symbols_cache + + symbols = set() + + # Also add filenames as completable symbols + if hasattr(coder, 'get_inchat_relative_files'): + symbols.update(coder.get_inchat_relative_files()) + if hasattr(coder, 'get_all_relative_files'): + # Add all project files too + symbols.update(coder.get_all_relative_files()) + + # Limit files to tokenize for performance + files_to_process = inchat_files[:30] + + try: + from pygments.lexers import guess_lexer_for_filename + from pygments.token import Token + except ImportError: + # Pygments not available, just return filenames + self._symbols_cache = symbols + self._symbols_files_hash = files_hash + return symbols + + for fname in files_to_process: + try: + with open(fname, 'r', encoding='utf-8', errors='ignore') as f: + content = f.read() + + lexer = guess_lexer_for_filename(fname, content) + tokens = lexer.get_tokens(content) + + for token_type, token_value in tokens: + # Extract identifiers (function names, class names, variables) + if token_type in Token.Name and len(token_value) > 1: + symbols.add(token_value) + except Exception: + continue + + self._symbols_cache = symbols + self._symbols_files_hash = files_hash + return symbols + + def _get_symbol_completions(self, prefix: str) -> list[str]: + """Get symbol completions for @ mentions.""" + symbols = self._extract_symbols() + prefix_lower = prefix.lower() + + if prefix: + matches = [s for s in symbols if prefix_lower in s.lower()] + else: + matches = list(symbols) + + return sorted(matches)[:50] + + def _get_path_completions(self, prefix: str) -> list[str]: + """Get filesystem path completions relative to coder root.""" + from pathlib import Path + + coder = self.worker.coder + root = Path(coder.root) if hasattr(coder, 'root') else Path.cwd() + + # Handle the prefix - could be partial path like "src/ma" or just "ma" + if "/" in prefix: + # Has directory component + dir_part, file_part = prefix.rsplit("/", 1) + search_dir = root / dir_part + search_prefix = file_part.lower() + path_prefix = dir_part + "/" + else: + search_dir = root + search_prefix = prefix.lower() + path_prefix = "" + + completions = [] + try: + if search_dir.exists() and search_dir.is_dir(): + for entry in search_dir.iterdir(): + name = entry.name + if search_prefix and search_prefix not in name.lower(): + continue + # Add trailing slash for directories + if entry.is_dir(): + completions.append(path_prefix + name + "/") + else: + completions.append(path_prefix + name) + except (PermissionError, OSError): + pass + + return sorted(completions) + + def _get_suggestions(self, text: str) -> list[str]: + """Get completion suggestions for given text.""" + input_area = self.query_one("#input", InputArea) + suggestions = [] + commands = self.worker.coder.commands + + if text.startswith("/"): + # Command completion + parts = text.split(maxsplit=1) + cmd_part = parts[0] + + if len(parts) == 1 and not text.endswith(" "): + # Complete command name + all_commands = commands.get_commands() + if cmd_part == "/": + suggestions = all_commands + else: + suggestions = [c for c in all_commands if c.startswith(cmd_part)] + else: + # Complete command argument + cmd_name = cmd_part + arg_prefix = parts[1] if len(parts) > 1 else "" + arg_prefix_lower = arg_prefix.lower() + + # Check if this command needs path-based completion + if cmd_name in self.PATH_COMPLETION_COMMANDS: + suggestions = self._get_path_completions(arg_prefix) + # For /read-only and /read-only-stub, also include add completions + if cmd_name in {"/read-only", "/read-only-stub"}: + try: + add_completions = commands.get_completions("/add") or [] + for c in add_completions: + if arg_prefix_lower in str(c).lower() and str(c) not in suggestions: + suggestions.append(str(c)) + except Exception: + pass + else: + # Use standard command completions (no file fallback) + try: + cmd_completions = commands.get_completions(cmd_name) + if cmd_completions: + if arg_prefix: + suggestions = [c for c in cmd_completions if arg_prefix_lower in str(c).lower()] + else: + suggestions = list(cmd_completions) + except Exception: + pass + elif "@" in text: + # Symbol completion triggered by @ + # Find the @ and get the prefix after it + at_index = text.rfind("@") + prefix = text[at_index + 1:] + suggestions = self._get_symbol_completions(prefix) + # No file completion for regular text - use @ for files/symbols + + return [str(s) for s in suggestions[:50]] + + def on_input_area_completion_requested(self, message: InputArea.CompletionRequested): + """Handle completion request - show or update completion bar.""" + input_area = self.query_one("#input", InputArea) + text = message.text + suggestions = self._get_suggestions(text) + + # Check if completion bar already exists + existing_bar = None + try: + existing_bar = self.query_one("#completion-bar", CompletionBar) + except Exception: + pass + + if suggestions: + input_area.completion_active = True + if existing_bar: + # Update existing bar in place + existing_bar.update_suggestions(suggestions, text) + else: + # Create new completion bar + completion_bar = CompletionBar( + suggestions=suggestions, + prefix=text, + id="completion-bar" + ) + self.mount(completion_bar, before=input_area) + else: + # No suggestions - dismiss if active + input_area.completion_active = False + if existing_bar: + existing_bar.remove() + + def on_input_area_completion_cycle(self, message: InputArea.CompletionCycle): + """Handle Tab to cycle through completions.""" + try: + completion_bar = self.query_one("#completion-bar", CompletionBar) + completion_bar.cycle_next() + except Exception: + pass + + def on_input_area_completion_accept(self, message: InputArea.CompletionAccept): + """Handle Enter to accept current completion.""" + try: + completion_bar = self.query_one("#completion-bar", CompletionBar) + completion_bar.select_current() + except Exception: + pass + + def on_input_area_completion_dismiss(self, message: InputArea.CompletionDismiss): + """Handle Escape to dismiss completions.""" + input_area = self.query_one("#input", InputArea) + input_area.completion_active = False + try: + completion_bar = self.query_one("#completion-bar", CompletionBar) + completion_bar.dismiss() + except Exception: + pass + + def on_completion_bar_selected(self, message: CompletionBar.Selected): + """Handle completion selection.""" + input_area = self.query_one("#input", InputArea) + input_area.completion_active = False + + # Insert the completion + current = input_area.value + selected = message.value + + if current.startswith("/"): + parts = current.split(maxsplit=1) + if len(parts) == 1: + # Replace entire command + # Only add space if command takes arguments + commands = self.worker.coder.commands + has_completions = commands.get_completions(selected) is not None + if has_completions: + input_area.value = selected + " " + else: + input_area.value = selected + else: + # Replace argument + input_area.value = parts[0] + " " + selected + elif "@" in current: + # Replace from @ onwards with the symbol + at_index = current.rfind("@") + input_area.value = current[:at_index] + selected + " " + else: + # Replace last word with completion + words = current.rsplit(maxsplit=1) + if len(words) > 1: + input_area.value = words[0] + " " + selected + else: + input_area.value = selected + + input_area.focus() + input_area.cursor_position = len(input_area.value) + + def on_completion_bar_dismissed(self, message: CompletionBar.Dismissed): + """Handle completion bar dismissal.""" + input_area = self.query_one("#input", InputArea) + input_area.completion_active = False + input_area.focus() diff --git a/aider/tui/io.py b/aider/tui/io.py new file mode 100644 index 00000000000..d6d605b020e --- /dev/null +++ b/aider/tui/io.py @@ -0,0 +1,368 @@ +"""TextualInputOutput - IO adapter for Textual TUI.""" + +import asyncio +import time + +from rich.console import Console + +from aider.io import InputOutput + + +class TextualInputOutput(InputOutput): + """InputOutput subclass that communicates with Textual TUI via queues.""" + + def __init__(self, output_queue, input_queue, **kwargs): + """Initialize TextualInputOutput. + + Args: + output_queue: queue.Queue for sending output to TUI + input_queue: queue.Queue for receiving input from TUI + **kwargs: Passed to InputOutput parent class + """ + # Initialize parent (fancy_input should already be False from caller) + super().__init__(**kwargs) + + # Store queues + self.output_queue = output_queue + self.input_queue = input_queue + + # Lazy-initialized console for TUI rendering + self._tui_console = None + + # Current task tracking + self.current_task_id = None + + # LLM response streaming state + self._streaming_response = False + + # Task detection patterns + self.task_markers = [ + ('Tool:', 'tool'), + ('Running', 'execution'), + ('Git:', 'git'), + ('Linting', 'lint'), + ('Testing', 'test'), + ('Adding', 'file_op'), + ('Removing', 'file_op'), + ] + + def _detect_task_start(self, text): + """Detect if this output should start a new task. + + Args: + text: Output text to check + + Returns: + Tuple of (should_start, title, task_type) or (False, None, None) + """ + for marker, task_type in self.task_markers: + if marker in text: + # Extract title from first line, max 50 chars + title = text.split('\n')[0][:50] + return True, title, task_type + + return False, None, None + + def start_task(self, title, task_type='general'): + """Start a new output task. + + Args: + title: Task title + task_type: Type of task + """ + self.current_task_id = f"task_{time.time()}" + self.output_queue.put({ + 'type': 'start_task', + 'task_id': self.current_task_id, + 'title': title, + 'task_type': task_type, + }) + + def _get_tui_console(self): + """Get or create console for TUI rendering.""" + if self._tui_console is None: + self._tui_console = Console( + force_terminal=True, + color_system="truecolor", + ) + return self._tui_console + + def stream_print(self, *messages, **kwargs): + """Override stream_print to send output to TUI queue. + + Args: + *messages: Messages to print + **kwargs: Additional arguments for console.print + """ + # Capture Rich rendering with forced ANSI output + console = self._get_tui_console() + with console.capture() as capture: + console.print(*messages, **kwargs) + text = capture.get() + + # Send to TUI via queue + self.output_queue.put({ + 'type': 'output', + 'text': text, + 'task_id': self.current_task_id, + }) + + def stream_output(self, text, final=False): + """Override stream_output to send streaming text to TUI. + + Uses Textual's MarkdownStream for efficient rendering. + + Args: + text: Text to stream + final: Whether this is the final chunk + """ + # Start response on first chunk + if not self._streaming_response and text: + self._streaming_response = True + self.output_queue.put({'type': 'start_response'}) + + # Stream the chunk + if text: + self.output_queue.put({ + 'type': 'stream_chunk', + 'text': text, + }) + + # End response on final chunk + if final and self._streaming_response: + self._streaming_response = False + self.output_queue.put({'type': 'end_response'}) + + def reset_streaming_response(self): + """Reset streaming state between responses.""" + if self._streaming_response: + self._streaming_response = False + self.output_queue.put({'type': 'end_response'}) + + def tool_output(self, *messages, **kwargs): + """Override tool_output to detect task boundaries and queue output. + + Args: + *messages: Messages to output + **kwargs: Additional arguments + """ + if messages: + text = ' '.join(str(m) for m in messages) + + # Check if this should start a new task + should_start, title, task_type = self._detect_task_start(text) + if should_start: + self.start_task(title, task_type) + + # Call parent to handle logging and actual output + super().tool_output(*messages, **kwargs) + + def start_spinner(self, text, update_last_text=True): + """Override start_spinner to send spinner state to TUI. + + Args: + text: Spinner text + update_last_text: Whether to update last_spinner_text + """ + # Call parent to maintain state + super().start_spinner(text, update_last_text) + + # Send to TUI + self.output_queue.put({ + 'type': 'spinner', + 'action': 'start', + 'text': text, + }) + + def update_spinner(self, text): + """Override update_spinner to send updates to TUI. + + Args: + text: New spinner text + """ + # Call parent + super().update_spinner(text) + + # Send to TUI + self.output_queue.put({ + 'type': 'spinner', + 'action': 'update', + 'text': text, + }) + + def stop_spinner(self): + """Override stop_spinner to send stop state to TUI.""" + # Call parent + super().stop_spinner() + + # Send to TUI + self.output_queue.put({ + 'type': 'spinner', + 'action': 'stop', + }) + + async def get_input( + self, + root, + rel_fnames, + addable_rel_fnames, + commands, + abs_read_only_fnames=None, + abs_read_only_stubs_fnames=None, + edit_format=None, + ): + """Override get_input to get input from TUI instead of prompt_toolkit. + + Args: + root: Project root directory + rel_fnames: Relative filenames in chat + addable_rel_fnames: Files that can be added + commands: Commands object + abs_read_only_fnames: Read-only files + abs_read_only_stubs_fnames: Stub files + edit_format: Edit format string + + Returns: + User input string + """ + # Signal TUI that we're ready for input + command_names = commands.get_commands() if commands else [] + + self.output_queue.put({ + 'type': 'ready_for_input', + 'files': list(addable_rel_fnames) if addable_rel_fnames else [], + 'commands': command_names, + }) + + # Wait for input from TUI (blocking in async context) + # We need to poll the queue since it's not async + while True: + try: + # Non-blocking get with timeout + import queue + result = self.input_queue.get(timeout=0.1) + + if 'text' in result: + user_input = result['text'] + + # Log the input (same as parent) + self.user_input(user_input) + + return user_input + except queue.Empty: + # No input yet, yield control + await asyncio.sleep(0.1) + + async def confirm_ask( + self, + question, + default="y", + subject=None, + explicit_yes_required=False, + group=None, + group_response=None, + allow_never=False, + allow_tweak=False, + acknowledge=False, + ): + """Override confirm_ask to show modal instead of inline prompt. + + Args: + question: Question to ask + default: Default response + subject: Optional subject/context + explicit_yes_required: Require explicit yes + group: Confirmation group + group_response: Group response key + allow_never: Allow "don't ask again" + allow_tweak: Allow "tweak" option + acknowledge: Require acknowledgement + + Returns: + User's response (True, False, "tweak", etc.) + """ + # Check if already answered via group or never + question_id = (question, subject) + if question_id in self.never_prompts: + return False + + if group and group.preference: + return group.preference == 'all' + + if group_response and group_response in self.group_responses: + return self.group_responses[group_response] + + # Increment user ask counter + self.num_user_asks += 1 + + # Send confirmation request to TUI + self.output_queue.put({ + 'type': 'confirmation', + 'question': question, + 'subject': subject, + 'options': { + 'default': default, + 'explicit_yes_required': explicit_yes_required, + 'group': group, + 'group_response': group_response, + 'allow_never': allow_never, + 'allow_tweak': allow_tweak, + 'acknowledge': acknowledge, + } + }) + + # Wait for response from TUI + while True: + try: + import queue + result = self.input_queue.get(timeout=0.1) + + if 'confirmed' in result: + response = result['confirmed'] + + # Handle special responses + if response == 'never': + self.never_prompts.add(question_id) + return False + elif response == 'tweak': + return 'tweak' + elif response == 'all': + if group: + group.preference = 'all' + if group_response: + self.group_responses[group_response] = True + return True + elif response == 'skip': + if group: + group.preference = 'skip' + if group_response: + self.group_responses[group_response] = False + return False + else: + # Regular boolean response + return bool(response) + except queue.Empty: + await asyncio.sleep(0.1) + + async def stop_task_streams(self): + """Override to avoid asyncio issues in worker thread. + + TUI doesn't use the same parallel streaming, so this is a no-op. + """ + pass + + async def stop_input_task(self): + """Override to avoid asyncio issues in worker thread.""" + pass + + async def stop_output_task(self): + """Override to avoid asyncio issues in worker thread.""" + pass + + def request_exit(self): + """Request the TUI to exit gracefully. + + This sends an exit signal to the TUI instead of calling sys.exit() + directly, allowing Textual to properly restore terminal state. + """ + self.output_queue.put({'type': 'exit'}) diff --git a/aider/tui/styles.tcss b/aider/tui/styles.tcss new file mode 100644 index 00000000000..b687705a56d --- /dev/null +++ b/aider/tui/styles.tcss @@ -0,0 +1,61 @@ +/* + * Aider CE TUI Styles + * + * Uses Textual theme variables for consistent theming. + * Theme defined in app.py (AIDER_THEME). + */ + +Screen { + background: $background; + layout: vertical; +} + +/* Output area */ +#output { + height: 1fr; + width: 100%; + background: $background; + padding: 0 1; + margin: 0; + scrollbar-background: $background; + scrollbar-color: $surface; + scrollbar-size: 1 1; +} + +/* Input area - floating card style */ +#input { + height: 3; + background: $surface; + border: none; + padding: 1 2; + margin: 0 1; +} + +#input:focus { + border: none; + background: $surface; +} + +/* Footer - solid Aider green */ +#footer { + height: 1; + width: 100%; + background: $primary; + color: $background; + padding: 0 1; +} + +/* Generic Input styling */ +Input { + background: $background; + border: none; +} + +Input:focus { + border: none; + background: $background; +} + +Input > .input--placeholder { + color: $secondary; +} diff --git a/aider/tui/widgets/__init__.py b/aider/tui/widgets/__init__.py new file mode 100644 index 00000000000..dec40f7fe26 --- /dev/null +++ b/aider/tui/widgets/__init__.py @@ -0,0 +1,15 @@ +"""Widgets for the Aider TUI.""" + +from .completion_bar import CompletionBar +from .footer import AiderFooter +from .input_area import InputArea +from .output import OutputContainer +from .status_bar import StatusBar + +__all__ = [ + "AiderFooter", + "CompletionBar", + "InputArea", + "OutputContainer", + "StatusBar", +] diff --git a/aider/tui/widgets/completion_bar.py b/aider/tui/widgets/completion_bar.py new file mode 100644 index 00000000000..cfc5ecd7fdd --- /dev/null +++ b/aider/tui/widgets/completion_bar.py @@ -0,0 +1,287 @@ +"""Completion bar widget for autocomplete suggestions.""" + +import os + +from textual.app import ComposeResult +from textual.message import Message +from textual.widget import Widget +from textual.widgets import Static + + +class CompletionBar(Widget, can_focus=False): + """Bar showing autocomplete suggestions above input (non-focusable).""" + + MAX_SUGGESTIONS = 50 + WINDOW_SIZE = 6 + + DEFAULT_CSS = """ + CompletionBar { + height: 1; + background: $surface; + margin: 0 1; + padding: 0 1; + layout: horizontal; + } + + CompletionBar .completion-prefix { + width: auto; + height: 1; + margin-right: 1; + color: $secondary; + background: $surface; + } + + CompletionBar .completion-item { + width: auto; + height: 1; + margin-right: 2; + color: $secondary; + background: $surface; + } + + CompletionBar .completion-item.selected { + color: $primary; + text-style: bold; + } + + CompletionBar .completion-more { + width: auto; + height: 1; + margin-right: 1; + color: $panel; + } + + CompletionBar .completion-hint { + width: auto; + height: 1; + color: $panel; + dock: right; + } + """ + + class Selected(Message): + """Completion selected message.""" + + def __init__(self, value: str): + self.value = value + super().__init__() + + class Dismissed(Message): + """Completion bar dismissed.""" + pass + + def __init__(self, suggestions: list[str] = None, prefix: str = "", **kwargs): + """Initialize completion bar. + + Args: + suggestions: List of completion suggestions + prefix: Current input prefix to complete from + """ + super().__init__(**kwargs) + self.suggestions = (suggestions or [])[:self.MAX_SUGGESTIONS] + self.prefix = prefix + self.selected_index = 0 + self._item_widgets: list[Static] = [] + self._prefix_widget: Static | None = None + self._left_more: Static | None = None + self._right_more: Static | None = None + self._hint: Static | None = None + + # Compute common directory prefix and display names + self._common_prefix = "" + self._display_names: list[str] = [] + self._compute_display_names() + + def _compute_display_names(self) -> None: + """Compute common directory prefix and short display names.""" + if not self.suggestions: + self._common_prefix = "" + self._display_names = [] + return + + # Check if these look like file paths (contain /) + has_paths = any("/" in s for s in self.suggestions) + + if not has_paths: + # Commands or non-path items - show as-is + self._common_prefix = "" + self._display_names = self.suggestions[:] + return + + # Find common directory prefix + dirs = [os.path.dirname(s) for s in self.suggestions] + if dirs and all(d == dirs[0] for d in dirs) and dirs[0]: + # All in same directory + self._common_prefix = dirs[0] + "/" + self._display_names = [os.path.basename(s) for s in self.suggestions] + else: + # Find longest common path prefix + common = os.path.commonpath(self.suggestions) if self.suggestions else "" + if common and "/" in common: + # Use the directory part of common prefix + self._common_prefix = common.rsplit("/", 1)[0] + "/" if "/" in common else "" + if self._common_prefix: + self._display_names = [s[len(self._common_prefix):] for s in self.suggestions] + else: + self._display_names = self.suggestions[:] + else: + self._common_prefix = "" + self._display_names = self.suggestions[:] + + def compose(self) -> ComposeResult: + """Create the bar layout.""" + # Directory prefix (shown once) + self._prefix_widget = Static(self._common_prefix, classes="completion-prefix") + self._prefix_widget.display = bool(self._common_prefix) + yield self._prefix_widget + + self._left_more = Static("…", classes="completion-more") + self._left_more.display = False + yield self._left_more + + self._item_widgets = [] + for i in range(self.WINDOW_SIZE): + if i < len(self._display_names): + classes = "completion-item selected" if i == 0 else "completion-item" + item = Static(self._display_names[i], classes=classes) + else: + item = Static("", classes="completion-item") + item.display = False + self._item_widgets.append(item) + yield item + + # Show "+N more" instead of just ellipsis + remaining = len(self.suggestions) - self.WINDOW_SIZE + more_text = f"+{remaining}" if remaining > 0 else "" + self._right_more = Static(more_text, classes="completion-more") + self._right_more.display = remaining > 0 + yield self._right_more + + self._hint = Static("Tab ↹ Enter ⏎ Esc ✗", classes="completion-hint") + yield self._hint + + def update_suggestions(self, suggestions: list[str], prefix: str = "") -> None: + """Update suggestions in place.""" + self.suggestions = suggestions[:self.MAX_SUGGESTIONS] + self.prefix = prefix + self.selected_index = 0 + + # Recompute display names + self._compute_display_names() + + # Update prefix widget + if self._prefix_widget: + self._prefix_widget.update(self._common_prefix) + self._prefix_widget.display = bool(self._common_prefix) + + self._refresh_items() + self._set_selection_classes() + + def _refresh_items(self) -> None: + """Update visible items - selected item always shown first.""" + # Ensure meta widgets exist + if self._left_more is None or self._left_more.parent is None: + self._left_more = Static("", classes="completion-more") + self.mount(self._left_more, before=self._item_widgets[0] if self._item_widgets else None) + if self._right_more is None or self._right_more.parent is None: + self._right_more = Static("", classes="completion-more") + self.mount(self._right_more, after=self._left_more if self._left_more else None) + if self._hint is None or self._hint.parent is None: + self._hint = Static("Tab ↹ Enter ⏎ Esc ✗", classes="completion-hint") + self.mount(self._hint) + + # Grow the widget list to the window size + while len(self._item_widgets) < self.WINDOW_SIZE: + new_item = Static("", classes="completion-item") + self._item_widgets.append(new_item) + target = self._right_more if self._right_more and self._right_more.parent else self._hint + self.mount(new_item, before=target) + + if not self._display_names: + for item in self._item_widgets: + item.display = False + if self._left_more: + self._left_more.display = False + if self._right_more: + self._right_more.display = False + return + + # Build display order: selected item first, then others after it + total = len(self._display_names) + items_before = self.selected_index + items_after = total - self.selected_index - 1 + + # Show indicator if there are items before selected + if self._left_more: + if items_before > 0: + self._left_more.update(f"{items_before}+") + self._left_more.display = True + else: + self._left_more.display = False + + # Fill window: selected first, then subsequent items + window_size = min(self.WINDOW_SIZE, total) + visible_indices = [] + + # Always include selected + visible_indices.append(self.selected_index) + + # Add items after selected + for i in range(1, window_size): + next_idx = self.selected_index + i + if next_idx < total: + visible_indices.append(next_idx) + + # Update item widgets + for i, item in enumerate(self._item_widgets): + if i < len(visible_indices): + display_index = visible_indices[i] + item.update(self._display_names[display_index]) + item.display = True + else: + item.display = False + + # Show indicator for remaining items after visible window + remaining_after = total - (self.selected_index + len(visible_indices)) + if self._right_more: + if remaining_after > 0: + self._right_more.update(f"+{remaining_after}") + self._right_more.display = True + else: + self._right_more.display = False + + def _set_selection_classes(self) -> None: + """Apply selected class - first visible item is always selected.""" + for i, item in enumerate(self._item_widgets): + if not item.display: + item.remove_class("selected") + continue + # First item is always the selected one + if i == 0: + item.add_class("selected") + else: + item.remove_class("selected") + + def _update_selection(self) -> None: + """Update visual selection state.""" + if not self.suggestions: + return + self._refresh_items() + self._set_selection_classes() + + def cycle_next(self) -> None: + """Cycle to next suggestion.""" + if self.suggestions: + self.selected_index = (self.selected_index + 1) % len(self.suggestions) + self._update_selection() + + def select_current(self) -> None: + """Select current suggestion and dismiss.""" + if self.suggestions: + self.post_message(self.Selected(self.suggestions[self.selected_index])) + self.remove() + + def dismiss(self) -> None: + """Dismiss without selecting.""" + self.post_message(self.Dismissed()) + self.remove() diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py new file mode 100644 index 00000000000..489801bc8a6 --- /dev/null +++ b/aider/tui/widgets/footer.py @@ -0,0 +1,152 @@ +"""Footer widget for Aider TUI.""" + +from rich.text import Text +from textual.reactive import reactive +from textual.widgets import Static + + +class AiderFooter(Static): + """Footer showing mode, model, project, git, and cost.""" + + # Left side info + aider_mode = reactive("code") + model_name = reactive("") + + # Right side info + project_name = reactive("") + git_branch = reactive("") + git_dirty = reactive(0) + cost = reactive(0.0) + + # Spinner state + spinner_text = reactive("") + spinner_visible = reactive(False) + _spinner_frame = 0 + _spinner_chars = "◐◓◑◒" + + def __init__( + self, + model_name: str = "", + project_name: str = "", + git_branch: str = "", + aider_mode: str = "code", + **kwargs + ): + """Initialize footer. + + Args: + model_name: Name of the AI model + project_name: Name of the project folder + git_branch: Current git branch name + aider_mode: Current edit mode (code, agent, architect, etc.) + """ + super().__init__(**kwargs) + self.model_name = model_name + self.project_name = project_name + self.git_branch = git_branch + self.aider_mode = aider_mode + self._spinner_interval = None + + def on_mount(self): + """Start spinner animation interval.""" + self._spinner_interval = self.set_interval(0.1, self._animate_spinner) + + def _animate_spinner(self): + """Animate the spinner character.""" + if self.spinner_visible: + self._spinner_frame = (self._spinner_frame + 1) % len(self._spinner_chars) + self.refresh() + + def _get_display_model(self) -> str: + """Get shortened model name for display.""" + if not self.model_name: + return "" + # Strip common prefixes like "openrouter/x-ai/" + name = self.model_name + if "/" in name: + name = name.split("/")[-1] + if len(name) > 25: + name = name[:22] + "..." + return name + + def render(self) -> Text: + """Render the footer with left/right split.""" + # Build left side: spinner/mode + model + left = Text() + + if self.spinner_visible: + spinner_char = self._spinner_chars[self._spinner_frame] + left.append(f"{spinner_char} ", style="bold #000000") + if self.spinner_text: + left.append(self.spinner_text, style="bold #000000") + left.append(" ") + + if self.aider_mode: + left.append(f"[{self.aider_mode}]", style="bold #003300") + left.append(" ") + + model_display = self._get_display_model() + if model_display: + left.append(model_display, style="#003300") + + # Build right side: project + git + cost + right = Text() + + if self.project_name: + right.append(self.project_name, style="#003300") + right.append(" ") + + if self.git_branch: + right.append(self.git_branch, style="bold #000000") + if self.git_dirty: + right.append(f" +{self.git_dirty}", style="bold #552200") + right.append(" ") + + # Always show cost + right.append(f"${self.cost:.2f}", style="#000000") + + # Calculate padding to right-align + try: + total_width = self.size.width + except Exception: + total_width = 80 + + left_len = len(left.plain) + right_len = len(right.plain) + padding = max(1, total_width - left_len - right_len) + + # Combine: left + padding + right + result = Text() + result.append_text(left) + result.append(" " * padding) + result.append_text(right) + + return result + + def update_cost(self, cost: float): + """Update the displayed cost.""" + self.cost = cost + self.refresh() + + def update_git(self, branch: str, dirty_count: int = 0): + """Update git status display.""" + self.git_branch = branch + self.git_dirty = dirty_count + self.refresh() + + def update_mode(self, mode: str): + """Update the chat mode display.""" + self.aider_mode = mode + self.refresh() + + def start_spinner(self, text: str = ""): + """Show spinner with optional text.""" + self.spinner_text = text + self.spinner_visible = True + self.refresh() + + def stop_spinner(self): + """Hide spinner.""" + self.spinner_visible = False + self.spinner_text = "" + self.refresh() diff --git a/aider/tui/widgets/input_area.py b/aider/tui/widgets/input_area.py new file mode 100644 index 00000000000..d9b14c43056 --- /dev/null +++ b/aider/tui/widgets/input_area.py @@ -0,0 +1,181 @@ +"""Input widget for Aider TUI.""" + +from prompt_toolkit.history import FileHistory +from textual.message import Message +from textual.widgets import Input + + +class InputArea(Input): + """Input widget with autocomplete and history support.""" + + class CompletionRequested(Message): + """User requested completion (Tab key or auto-trigger).""" + + def __init__(self, text: str): + self.text = text + super().__init__() + + class CompletionCycle(Message): + """User wants to cycle through completions.""" + pass + + class CompletionAccept(Message): + """User wants to accept current completion.""" + pass + + class CompletionDismiss(Message): + """User wants to dismiss completions.""" + pass + + def __init__(self, history_file: str = None, **kwargs): + """Initialize input area. + + Args: + history_file: Path to input history file for up/down navigation + """ + super().__init__( + placeholder="> Type your message...", + **kwargs + ) + self.files = [] + self.commands = [] + self.completion_active = False + + # History support - lazy loaded + self.history_file = history_file + self._history: list[str] | None = None # None = not loaded yet + self._history_index = -1 # -1 = not navigating, 0+ = position in history + self._saved_input = "" # Saves current input when navigating history + + def _ensure_history_loaded(self) -> list[str]: + """Lazily load history on first access. + + Returns history with most recent at the end (index -1). + """ + if self._history is None: + self._history = [] + if self.history_file: + try: + # FileHistory returns most recent first, so reverse it + self._history = list(reversed(list(FileHistory(self.history_file).load_history_strings()))) + except (OSError, IOError): + pass # History file doesn't exist yet or can't be read + return self._history + + def update_autocomplete_data(self, files, commands): + """Update autocomplete suggestions. + + Args: + files: List of file paths for autocomplete + commands: List of command names for autocomplete + """ + self.files = files + self.commands = commands + + def save_to_history(self, text: str) -> None: + """Save input to history file and in-memory list. + + Args: + text: The input text to save + """ + # Skip empty, whitespace-only, or very short inputs + if not text or not text.strip() or len(text.strip()) <= 1: + return + + # Skip if same as last history entry + history = self._ensure_history_loaded() + if history and history[-1] == text: + return + + # Save to file + if self.history_file: + try: + FileHistory(self.history_file).append_string(text) + except (OSError, IOError): + pass + + # Add to in-memory history + history.append(text) + + # Reset navigation state + self._history_index = -1 + self._saved_input = "" + + def _history_prev(self) -> None: + """Navigate to previous (older) history entry.""" + history = self._ensure_history_loaded() + if not history: + return + + # Save current input when first entering history + if self._history_index == -1: + self._saved_input = self.value + self._history_index = len(history) - 1 + elif self._history_index > 0: + self._history_index -= 1 + else: + return # Already at oldest + + self.value = history[self._history_index] + self.cursor_position = len(self.value) + + def _history_next(self) -> None: + """Navigate to next (newer) history entry.""" + if self._history_index == -1: + return # Not navigating history + + history = self._ensure_history_loaded() + if self._history_index < len(history) - 1: + self._history_index += 1 + self.value = history[self._history_index] + else: + # Back to current input + self._history_index = -1 + self.value = self._saved_input + + self.cursor_position = len(self.value) + + def on_key(self, event) -> None: + """Handle keys for completion and history navigation.""" + if self.disabled: + return + + if event.key == "tab": + event.stop() + event.prevent_default() + if self.completion_active: + # Cycle through completions + self.post_message(self.CompletionCycle()) + else: + # Request completions + self.post_message(self.CompletionRequested(self.value)) + elif event.key == "escape" and self.completion_active: + event.stop() + event.prevent_default() + self.post_message(self.CompletionDismiss()) + elif event.key == "up": + # Navigate to previous history entry + event.stop() + event.prevent_default() + self._history_prev() + elif event.key == "down": + # Navigate to next history entry + event.stop() + event.prevent_default() + self._history_next() + + def on_input_changed(self, event) -> None: + """Update completions as user types.""" + if not self.disabled: + # Auto-trigger for slash commands, @ symbols, or update existing completions + if event.value.startswith("/") or "@" in event.value or self.completion_active: + self.post_message(self.CompletionRequested(event.value)) + + def on_input_submitted(self, event) -> None: + """Handle Enter key - accept completion if active.""" + if self.completion_active: + # Let app handle accepting completion + self.post_message(self.CompletionAccept()) + # Prevent the default submit behavior + event.stop() + event.prevent_default() diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index 6e89912c6b8..25c3c41cb1b 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -1,12 +1,10 @@ -"""Output widgets for Aider TUI.""" +"""Output widget for Aider TUI using Textual's Markdown widget.""" import re -from io import StringIO -from rich.console import Console -from rich.text import Text +from textual.containers import VerticalScroll from textual.message import Message -from textual.widgets import RichLog +from textual.widgets import Markdown, Static class CostUpdate(Message): @@ -17,148 +15,154 @@ def __init__(self, cost: float): super().__init__() -class OutputContainer(RichLog): - """Single scrollable output area.""" +class OutputContainer(VerticalScroll): + """Scrollable output area using Markdown widgets for rich rendering. + + Uses Textual's native Markdown widget with MarkdownStream for + efficient streaming of LLM responses. + """ + + DEFAULT_CSS = """ + OutputContainer { + scrollbar-gutter: stable; + background: $background; + } + + OutputContainer > Markdown { + margin: 0 1; + padding: 0; + background: $background; + } + + OutputContainer > .user-message { + margin: 1 1 0 1; + padding: 0; + color: $primary; + background: $background; + } + + OutputContainer > .system-message { + margin: 0 1; + padding: 0; + color: $secondary; + background: $background; + } + """ def __init__(self, **kwargs): - """Initialize output container.""" - super().__init__(auto_scroll=True, wrap=True, markup=True, **kwargs) - def start_task(self, task_id: str, title: str, task_type: str = "general"): - """Start a new task section (visual separator).""" - self._flush_buffer() - self.write(Text(f"\n─── {title} ───", style="dim")) - - def add_output(self, text: str, task_id: str = None): - """Add output text with intelligent whitespace handling.""" - if not text: - return - - # Clean up the text first - text = self._normalize_whitespace(text) - text = self._format_markdown(text) - text = self._format_code_blocks(text) - text = self._format_message_type(text) - - # Buffer for complete lines - self._stream_buffer += text - - # Only flush complete lines (RichLog.write adds newline after each call) - while '\n' in self._stream_buffer: - line, self._stream_buffer = self._stream_buffer.split('\n', 1) - self._write_line(line) -class CostUpdate(Message): - """Message to update cost in footer.""" + super().__init__(**kwargs) + self._current_markdown: Markdown | None = None + self._stream = None + self._buffer = "" - def __init__(self, cost: float): - self.cost = cost - super().__init__() + async def start_response(self): + """Start a new LLM response section with streaming support.""" + # Stop any existing stream + await self._stop_stream() + # Create new Markdown widget for this response + self._current_markdown = Markdown("", id=f"response-{len(self.children)}") + await self.mount(self._current_markdown) -class OutputContainer(RichLog): - """Single scrollable output area.""" + # Create stream for efficient updates + self._stream = Markdown.get_stream(self._current_markdown) + self._buffer = "" - def __init__(self, **kwargs): - """Initialize output container.""" - super().__init__(auto_scroll=True, wrap=True, markup=True, **kwargs) - self._stream_buffer = "" + # Keep scrolled to bottom + self.anchor() - def start_task(self, task_id: str, title: str, task_type: str = "general"): - """Start a new task section (visual separator).""" - self._flush_buffer() - self.write(Text(f"\n─── {title} ───", style="dim")) - - def add_output(self, text: str, task_id: str = None): - """Add output text, buffering for complete lines.""" + async def stream_chunk(self, text: str): + """Stream a chunk of markdown text.""" if not text: return - self._stream_buffer += text - - # Only flush complete lines (RichLog.write adds newline after each call) - while '\n' in self._stream_buffer: - line, self._stream_buffer = self._stream_buffer.split('\n', 1) - self._write_line(line) + # Check for cost updates in the text + self._check_cost(text) + + if self._stream: + # Use MarkdownStream for efficient batched updates + await self._stream.write(text) + elif self._current_markdown: + # Fallback: append to buffer and update + self._buffer += text + await self._current_markdown.update(self._buffer) + else: + # No active response - start one + await self.start_response() + await self.stream_chunk(text) + + async def end_response(self): + """End the current LLM response.""" + await self._stop_stream() + + async def _stop_stream(self): + """Stop the current markdown stream.""" + if self._stream: + try: + await self._stream.stop() + except Exception: + pass + self._stream = None + + def add_user_message(self, text: str): + """Add a user message (displayed differently from LLM output).""" + # User messages shown with > prefix, markup disabled to avoid parsing issues + static = Static(f"> {text}", classes="user-message", markup=False) + self.mount(static) + self.scroll_end(animate=False) + + def add_system_message(self, text: str): + """Add a system/tool message.""" + if not text.strip(): + return - def _flush_buffer(self): - """Flush any remaining buffered content.""" - if self._stream_buffer: - self._write_line(self._stream_buffer) - self._stream_buffer = "" + # Strip ANSI codes + text = re.sub(r'\x1b\[[0-9;]*m', '', text) + # Strip Rich markup tags like [blue], [/bold], etc. + text = re.sub(r'\[/?[a-zA-Z0-9_ #/]+\]', '', text) - def _write_line(self, text: str): - """Write a single line to the display.""" - if not text: + if not text.strip(): return - # Check for cost information and emit update - self._check_for_cost(text) + # Create Static with markup disabled to avoid Rich parsing issues + static = Static(text, classes="system-message", markup=False) + self.mount(static) + self.scroll_end(animate=False) - # Strip Rich markup tags like [blue], [/], [bold], etc. - text = self._strip_rich_markup(text) + def add_output(self, text: str, task_id: str = None): + """Add output text as a system message. - # Handle ANSI codes first - if '\x1b' in text: - self.write(Text.from_ansi(text)) + This handles tool output, status messages, etc. + LLM streaming is handled separately via start_response/stream_chunk/end_response. + """ + if not text: return - # Try to render as markdown for formatting - if self._has_markdown(text): - try: - from rich.markdown import Markdown - width = max(self.size.width - 2, 40) - console = Console(file=StringIO(), force_terminal=True, width=width) - md = Markdown(text) - with console.capture() as capture: - console.print(md, end="") - rendered = capture.get().rstrip('\n') - if rendered: - self.write(Text.from_ansi(rendered)) - return - except Exception: - pass - - # Fallback to plain text - self.write(Text(text)) + # Check for cost updates + self._check_cost(text) - def _has_markdown(self, text: str) -> bool: - """Check if text has markdown formatting.""" - # Look for common markdown patterns - patterns = ['**', '__', '`', '```', '##', '- ', '* ', '1. '] - return any(p in text for p in patterns) + # Always treat add_output as system messages + # LLM streaming goes through the dedicated stream_chunk path + self.add_system_message(text) - def _check_for_cost(self, text: str): - """Check for cost info and post message to update footer.""" - # Look for pattern like "$0.0086 session" or "$X.XX session" - match = re.search(r'\$(\d+\.?\d*)\s*session', text) + def _check_cost(self, text: str): + """Extract and emit cost updates.""" + match = re.search(r"\$(\d+\.?\d*)\s*session", text) if match: try: - cost = float(match.group(1)) - self.post_message(CostUpdate(cost)) + self.post_message(CostUpdate(float(match.group(1)))) except (ValueError, AttributeError): pass - def _strip_rich_markup(self, text: str) -> str: - """Remove Rich console markup tags from text.""" - # Pattern matches [tagname] and [/tagname] and [/] - # Common tags: [blue], [bold], [red], [green], [/], [/blue], etc. - pattern = r'\[/?(?:blue|red|green|yellow|bold|dim|italic|underline|strike|reverse|blink|/)*\]' - return re.sub(pattern, '', text) - - def add_markdown(self, text: str): - """Add markdown content (renders via Rich).""" - self._flush_buffer() - try: - console = Console(file=StringIO(), force_terminal=True, width=self.size.width - 4) - from rich.markdown import Markdown - md = Markdown(text) - with console.capture() as capture: - console.print(md) - rendered = capture.get() - self.write(Text.from_ansi(rendered)) - except Exception: - self.write(Text(text)) + def start_task(self, task_id: str, title: str, task_type: str = "general"): + """Start a new task section.""" + static = Static(f"\n{title}", classes="system-message", markup=False) + self.mount(static) + self.scroll_end(animate=False) def clear_output(self): """Clear all output.""" - self._stream_buffer = "" - self.clear() \ No newline at end of file + self._current_markdown = None + self._stream = None + self._buffer = "" + self.remove_children() diff --git a/aider/tui/widgets/status_bar.py b/aider/tui/widgets/status_bar.py new file mode 100644 index 00000000000..9b8d64ea58f --- /dev/null +++ b/aider/tui/widgets/status_bar.py @@ -0,0 +1,228 @@ +"""Unified status bar widget for notifications and confirmations.""" + +from textual.app import ComposeResult +from textual.containers import Horizontal +from textual.message import Message +from textual.reactive import reactive +from textual.widget import Widget +from textual.widgets import Static + + +class StatusBar(Widget, can_focus=True): + """Unified status bar for notifications, confirmations, and prompts. + + Modes: + - hidden: Bar is not displayed + - notification: Shows a transient message (auto-dismisses) + - confirm: Shows a y/n/a confirmation prompt + """ + + DEFAULT_CSS = """ + StatusBar { + height: auto; + background: $background; + margin: 0 1; + padding: 0; + } + + StatusBar.hidden { + display: none; + } + + /* Content container */ + StatusBar .status-content { + height: 3; + padding: 1 2; + layout: horizontal; + align: left middle; + background: $background; + } + + /* Notification styles */ + StatusBar .notification-text { + width: 1fr; + height: 1; + color: $secondary; + content-align: left middle; + background: $background; + } + + StatusBar .notification-text.info { + color: $secondary; + } + + StatusBar .notification-text.warning { + color: $warning; + } + + StatusBar .notification-text.error { + color: $error; + } + + StatusBar .notification-text.success { + color: $success; + } + + /* Confirmation styles */ + StatusBar .confirm-question { + width: 1fr; + height: 1; + color: $foreground; + content-align: left middle; + background: $background; + } + + StatusBar .confirm-hints { + width: auto; + height: 1; + dock: right; + background: $background; + } + + StatusBar .hint { + width: auto; + height: 1; + margin-left: 2; + color: $secondary; + background: $background; + } + + StatusBar .hint-yes { + color: $success; + } + + StatusBar .hint-no { + color: $warning; + } + + StatusBar .hint-all { + color: $secondary; + } + """ + + # Current mode + mode: reactive[str] = reactive("hidden") + + class ConfirmResponse(Message): + """Confirmation response message.""" + + def __init__(self, result: bool | str): + self.result = result + super().__init__() + + def __init__(self, **kwargs): + """Initialize status bar.""" + super().__init__(**kwargs) + self._text = "" + self._severity = "info" + self._show_all = False + self._timer = None + + def compose(self) -> ComposeResult: + """Create empty container - content added dynamically.""" + yield Horizontal(classes="status-content") + + def watch_mode(self, mode: str) -> None: + """React to mode changes.""" + self.remove_class("hidden") + if mode == "hidden": + self.add_class("hidden") + self.can_focus = False + else: + self.can_focus = (mode == "confirm") + + def _rebuild_content(self) -> None: + """Rebuild the content based on current mode.""" + container = self.query_one(".status-content") + container.remove_children() + + if self.mode == "notification": + container.mount( + Static(self._text, classes=f"notification-text {self._severity}") + ) + elif self.mode == "confirm": + container.mount(Static(self._text, classes="confirm-question")) + hints = Horizontal(classes="confirm-hints") + container.mount(hints) + hints.mount(Static("\\[y]es", classes="hint hint-yes")) + hints.mount(Static("\\[n]o", classes="hint hint-no")) + if self._show_all: + hints.mount(Static("\\[a]ll", classes="hint hint-all")) + + def show_notification( + self, + text: str, + severity: str = "info", + timeout: float | None = 3.0 + ) -> None: + """Show a transient notification message. + + Args: + text: Message to display + severity: One of "info", "warning", "error", "success" + timeout: Auto-dismiss after this many seconds (None = no auto-dismiss) + """ + # Cancel any existing timer + if self._timer: + self._timer.stop() + self._timer = None + + self._text = text + self._severity = severity + self.mode = "notification" + self._rebuild_content() + + if timeout: + self._timer = self.set_timer(timeout, self.hide) + + def show_confirm(self, question: str, show_all: bool = False) -> None: + """Show a confirmation prompt. + + Args: + question: Question to display + show_all: Whether to show "all" option + """ + # Cancel any existing timer + if self._timer: + self._timer.stop() + self._timer = None + + self._text = question + self._show_all = show_all + self.mode = "confirm" + self._rebuild_content() + self.focus() + + def hide(self) -> None: + """Hide the status bar.""" + if self._timer: + self._timer.stop() + self._timer = None + self.mode = "hidden" + + def on_key(self, event) -> None: + """Handle key shortcuts for confirm mode.""" + if self.mode != "confirm": + return + + key = event.key.lower() + if key == "y": + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse(True)) + self.hide() + elif key == "n": + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse(False)) + self.hide() + elif key == "a" and self._show_all: + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse("all")) + self.hide() + elif key == "escape": + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse(False)) + self.hide() diff --git a/aider/tui/worker.py b/aider/tui/worker.py new file mode 100644 index 00000000000..1ea4f25a6a7 --- /dev/null +++ b/aider/tui/worker.py @@ -0,0 +1,154 @@ +"""Worker thread for running Coder in background.""" + +import asyncio +import logging +import threading +from typing import Optional + +from aider.coders import Coder +from aider.commands import SwitchCoder + +# Suppress asyncio task destroyed warnings during shutdown +logging.getLogger('asyncio').setLevel(logging.CRITICAL) + +# Also suppress via warnings module +import warnings +warnings.filterwarnings('ignore', message='.*Task was destroyed.*') +warnings.filterwarnings('ignore', message='.*coroutine.*was never awaited.*') + + +class CoderWorker: + """Runs Coder in a background thread with its own event loop.""" + + def __init__(self, coder, output_queue, input_queue): + """Initialize worker with coder instance and communication queues. + + Args: + coder: The Coder instance to run + output_queue: queue.Queue for sending output to TUI + input_queue: queue.Queue for receiving input from TUI + """ + self.coder = coder + self.output_queue = output_queue # queue.Queue + self.input_queue = input_queue # queue.Queue + self.thread: Optional[threading.Thread] = None + self.loop: Optional[asyncio.AbstractEventLoop] = None + self.running = False + + def start(self): + """Start the worker thread.""" + self.running = True + self.thread = threading.Thread(target=self._run_thread, daemon=True) + self.thread.start() + + def _run_thread(self): + """Thread entry point - creates event loop and runs coder.""" + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + try: + self.loop.run_until_complete(self._async_run()) + except asyncio.CancelledError: + pass + except RuntimeError: + # Event loop stopped - this is expected during shutdown + pass + finally: + self._cleanup_loop() + + def _cleanup_loop(self): + """Clean up the event loop safely.""" + if not self.loop: + return + + try: + # Cancel pending tasks if loop is still running + if not self.loop.is_closed(): + pending = asyncio.all_tasks(self.loop) + for task in pending: + task.cancel() + + # Only try to gather if loop isn't stopped + if self.loop.is_running(): + pass # Can't do much if loop is still running + elif pending: + try: + self.loop.run_until_complete( + asyncio.gather(*pending, return_exceptions=True) + ) + except RuntimeError: + pass # Loop already stopped + + self.loop.close() + except Exception: + pass # Ignore cleanup errors + + async def _async_run(self): + """Async entry point - runs coder loop.""" + while self.running: + try: + await self.coder.run() + break # Normal exit + except asyncio.CancelledError: + break + except SwitchCoder as switch: + # Handle chat mode switches (e.g., /chat-mode architect) + try: + kwargs = dict(io=self.coder.io, from_coder=self.coder) + kwargs.update(switch.kwargs) + if "show_announcements" in kwargs: + del kwargs["show_announcements"] + kwargs["num_cache_warming_pings"] = 0 + kwargs["args"] = self.coder.args + # Skip summarization to avoid blocking LLM calls during mode switch + kwargs["summarize_from_coder"] = False + + # Transfer MCP state to avoid re-initialization + old_mcp_servers = self.coder.mcp_servers + old_mcp_tools = self.coder.mcp_tools + kwargs["mcp_servers"] = [] # Empty to skip initialization + self.coder = await Coder.create(**kwargs) + # Restore MCP state + self.coder.mcp_servers = old_mcp_servers + self.coder.mcp_tools = old_mcp_tools + + # Notify TUI of mode change + edit_format = getattr(self.coder, 'edit_format', 'code') or 'code' + self.output_queue.put({ + 'type': 'mode_change', + 'mode': edit_format, + }) + except Exception as e: + self.output_queue.put({ + 'type': 'error', + 'message': f"Failed to switch mode: {e}" + }) + break + # Continue the loop with the new coder + except Exception as e: + self.output_queue.put({ + 'type': 'error', + 'message': str(e) + }) + break + + def stop(self): + """Stop the worker thread gracefully.""" + self.running = False + + # Signal the coder to stop + if hasattr(self.coder, 'input_running'): + self.coder.input_running = False + if hasattr(self.coder, 'output_running'): + self.coder.output_running = False + + if self.loop and self.loop.is_running(): + try: + self.loop.call_soon_threadsafe(self.loop.stop) + except RuntimeError: + # Loop may already be closed + pass + + # Wait for thread to finish + if self.thread and self.thread.is_alive(): + self.thread.join(timeout=2.0) diff --git a/pyproject.toml b/pyproject.toml index 83089d24c3f..e305050eb84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ dev = { file = "requirements/requirements-dev.in" } help = { file = "requirements/requirements-help.in" } browser = { file = "requirements/requirements-browser.in" } playwright = { file = "requirements/requirements-playwright.in" } +tui = { file = "requirements/requirements-tui.in" } [tool.setuptools] include-package-data = true diff --git a/requirements/requirements-tui.in b/requirements/requirements-tui.in new file mode 100644 index 00000000000..d649decc98c --- /dev/null +++ b/requirements/requirements-tui.in @@ -0,0 +1 @@ +textual>=0.50.0 From c8092c51644a0de7a431ba4fecc698b0b132bb4e Mon Sep 17 00:00:00 2001 From: 1Broseidon Date: Tue, 9 Dec 2025 14:33:54 -0600 Subject: [PATCH 03/28] feat: tui loading title --- aider/tui/app.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/aider/tui/app.py b/aider/tui/app.py index b4470839445..f3b1c28fedf 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -85,8 +85,23 @@ def compose(self) -> ComposeResult: id="footer" ) + # ASCII banner for startup + BANNER = """\ + +[bold green] ██████╗███████╗ ██████╗██╗ ██╗ +██╔════╝██╔════╝██╔════╝██║ ██║ +██║ █████╗ ██║ ██║ ██║ +██║ ██╔══╝ ██║ ██║ ██║ +╚██████╗███████╗╚██████╗███████╗██║ + ╚═════╝╚══════╝ ╚═════╝╚══════╝╚═╝[/bold green] +""" + def on_mount(self): """Called when app starts.""" + # Show startup banner + output_container = self.query_one("#output", OutputContainer) + output_container.add_output(self.BANNER) + self.set_interval(0.05, self.check_output_queue) self.worker.start() self.query_one("#input").focus() From 041067ec0081cacd51b453ff91ec72f400a91e24 Mon Sep 17 00:00:00 2001 From: Travis Bender Date: Fri, 12 Dec 2025 07:20:25 -0700 Subject: [PATCH 04/28] fix: defer confirmation handling to resolve Windows event loop race condition --- aider/coders/base_coder.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2bea2b0f224..8b65fba01f4 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1351,8 +1351,11 @@ async def input_task(self, preproc): try: user_message = self.io.input_task.result() + # Defer to confirmation handler to fix Windows event loop race. + if self.io.confirmation_in_progress: + pass # Set user message for output task - if not self.io.acknowledge_confirmation(): + elif not self.io.acknowledge_confirmation(): if user_message: self.user_message = user_message await self.auto_save_session() From e24605c8dd4598e26ab99a263bae2af4ca0e10a1 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Fri, 12 Dec 2025 18:47:31 -0500 Subject: [PATCH 05/28] Allow messages exceeding context to be auto approved --- aider/coders/base_coder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2bea2b0f224..2a9043a6c7f 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -2129,7 +2129,7 @@ async def check_tokens(self, messages): " the context limit is exceeded." ) - if not await self.io.confirm_ask("Try to proceed anyway?", explicit_yes_required=True): + if not await self.io.confirm_ask("Try to proceed anyway?"): return False return True From 553d5f86e1c2e0599c1b9c08bd8fdf18667164d3 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Fri, 12 Dec 2025 21:21:20 -0500 Subject: [PATCH 06/28] Respect parallel input --- aider/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/main.py b/aider/main.py index daee334f84d..52428ce97e9 100644 --- a/aider/main.py +++ b/aider/main.py @@ -686,7 +686,7 @@ def get_io(pretty): try: from aider.tui import create_tui_io - args.linear_output = True + # args.linear_output = True print("Starting aider TUI...", flush=True) io, output_queue, input_queue = create_tui_io(args, editing_mode) except ImportError as e: From d8f479fff6cf864fd20dbc44f14b7db94d88b858 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sat, 13 Dec 2025 01:33:20 -0500 Subject: [PATCH 07/28] Round 1: Styling Updates --- aider/coders/base_coder.py | 2 +- aider/io.py | 2 +- aider/main.py | 2 +- aider/tui/app.py | 138 +++++++++++++++--------------- aider/tui/io.py | 166 ++++++++++++++++++++---------------- aider/tui/styles.tcss | 26 +++--- aider/tui/widgets/footer.py | 19 +++-- aider/tui/widgets/output.py | 159 +++++++++++++++++----------------- 8 files changed, 265 insertions(+), 249 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 2a9043a6c7f..81f88346279 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -546,7 +546,7 @@ def __init__( def get_announcements(self): lines = [] - lines.append(f"Aider-CE v{__version__}") + lines.append(f"cecli v{__version__}") # Model main_model = self.main_model diff --git a/aider/io.py b/aider/io.py index 79e8cf85fbd..da04bc7679d 100644 --- a/aider/io.py +++ b/aider/io.py @@ -315,7 +315,7 @@ def __init__( user_input_color="blue", tool_output_color=None, tool_error_color="red", - tool_warning_color="#FFA500", + tool_warning_color="#ffd700", assistant_output_color="blue", completion_menu_color=None, completion_menu_bg_color=None, diff --git a/aider/main.py b/aider/main.py index 52428ce97e9..daee334f84d 100644 --- a/aider/main.py +++ b/aider/main.py @@ -686,7 +686,7 @@ def get_io(pretty): try: from aider.tui import create_tui_io - # args.linear_output = True + args.linear_output = True print("Starting aider TUI...", flush=True) io, output_queue, input_queue = create_tui_io(args, editing_mode) except ImportError as e: diff --git a/aider/tui/app.py b/aider/tui/app.py index f3b1c28fedf..5e062165ff5 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -6,28 +6,22 @@ from textual.binding import Binding from textual.theme import Theme -from .widgets import ( - AiderFooter, - CompletionBar, - InputArea, - OutputContainer, - StatusBar, -) +from .widgets import AiderFooter, CompletionBar, InputArea, OutputContainer, StatusBar from .widgets.output import CostUpdate -# Aider theme - dark with green accent +# Aider theme - dark with blue accent AIDER_THEME = Theme( name="aider", - primary="#00aa00", # Aider green + primary="#00ff5f", # Cecli blue secondary="#888888", - accent="#00aa00", + accent="#00ff87", foreground="#ffffff", - background="#0d0d0d", # Near black + background="rgba(0,0,0,0.1)", # Near black success="#00aa00", - warning="#ffaa00", + warning="#ffd700", error="#ff3333", - surface="#1a1a1a", # Slightly lighter than background - panel="#262626", + surface="transparent", # Slightly lighter than background + panel="transparent", dark=True, ) @@ -38,7 +32,7 @@ class AiderApp(App): CSS_PATH = "styles.tcss" BINDINGS = [ - Binding("ctrl+c", "quit", "Quit", show=True), + # Binding("ctrl+c", "quit", "Quit", show=True), Binding("ctrl+l", "clear_output", "Clear", show=True), ] @@ -60,17 +54,21 @@ def compose(self) -> ComposeResult: """Create child widgets.""" coder = self.worker.coder model_name = coder.main_model.name if coder.main_model else "Unknown" - aider_mode = getattr(coder, 'edit_format', 'code') or 'code' + aider_mode = getattr(coder, "edit_format", "code") or "code" # Get project name (just the folder name, not full path) project_name = "" if coder.repo: - project_name = coder.repo.root.name if hasattr(coder.repo.root, 'name') else str(coder.repo.root).split('/')[-1] + project_name = ( + coder.repo.root.name + if hasattr(coder.repo.root, "name") + else str(coder.repo.root).split("/")[-1] + ) else: project_name = "No repo" # Get history file path from coder's io - history_file = getattr(coder.io, 'input_history_file', None) + history_file = getattr(coder.io, "input_history_file", None) # Simple vertical layout - no header, footer has all info # Git info loaded in on_mount to avoid blocking startup @@ -82,25 +80,24 @@ def compose(self) -> ComposeResult: project_name=project_name, git_branch="", # Loaded async in on_mount aider_mode=aider_mode, - id="footer" + id="footer", ) # ASCII banner for startup - BANNER = """\ - -[bold green] ██████╗███████╗ ██████╗██╗ ██╗ -██╔════╝██╔════╝██╔════╝██║ ██║ -██║ █████╗ ██║ ██║ ██║ -██║ ██╔══╝ ██║ ██║ ██║ -╚██████╗███████╗╚██████╗███████╗██║ - ╚═════╝╚══════╝ ╚═════╝╚══════╝╚═╝[/bold green] + BANNER = """ +[bold spring_green2] ██████╗███████╗ ██████╗██╗ ██╗[/bold spring_green2] +[bold spring_green1]██╔════╝██╔════╝██╔════╝██║ ██║[/bold spring_green1] +[bold medium_spring_green]██║ █████╗ ██║ ██║ ██║[/bold medium_spring_green] +[bold cyan2]██║ ██╔══╝ ██║ ██║ ██║[/bold cyan2] +[bold cyan1]╚██████╗███████╗╚██████╗███████╗██║[/bold cyan1] +[bold bright_white] ╚═════╝╚══════╝ ╚═════╝╚══════╝╚═╝[/bold bright_white] """ def on_mount(self): """Called when app starts.""" # Show startup banner output_container = self.query_one("#output", OutputContainer) - output_container.add_output(self.BANNER) + output_container.add_output(self.BANNER, dim=False) self.set_interval(0.05, self.check_output_queue) self.worker.start() @@ -131,41 +128,41 @@ def check_output_queue(self): def handle_output_message(self, msg): """Route output messages to appropriate handlers.""" - msg_type = msg['type'] + msg_type = msg["type"] - if msg_type == 'output': - self.add_output(msg['text'], msg.get('task_id')) - elif msg_type == 'start_response': + if msg_type == "output": + self.add_output(msg["text"], msg.get("task_id")) + elif msg_type == "start_response": # Start a new LLM response with streaming self.run_worker(self._start_response()) - elif msg_type == 'stream_chunk': + elif msg_type == "stream_chunk": # Stream a chunk of LLM response - self.run_worker(self._stream_chunk(msg['text'])) - elif msg_type == 'end_response': + self.run_worker(self._stream_chunk(msg["text"])) + elif msg_type == "end_response": # End the current LLM response self.run_worker(self._end_response()) - elif msg_type == 'start_task': - self.start_task(msg['task_id'], msg['title'], msg.get('task_type')) - elif msg_type == 'confirmation': + elif msg_type == "start_task": + self.start_task(msg["task_id"], msg["title"], msg.get("task_type")) + elif msg_type == "confirmation": self.show_confirmation(msg) - elif msg_type == 'spinner': + elif msg_type == "spinner": self.update_spinner(msg) - elif msg_type == 'ready_for_input': + elif msg_type == "ready_for_input": self.enable_input(msg) footer = self.query_one(AiderFooter) footer.stop_spinner() - elif msg_type == 'error': - self.show_error(msg['message']) - elif msg_type == 'cost_update': + elif msg_type == "error": + self.show_error(msg["message"]) + elif msg_type == "cost_update": footer = self.query_one(AiderFooter) - footer.update_cost(msg.get('cost', 0)) - elif msg_type == 'exit': + footer.update_cost(msg.get("cost", 0)) + elif msg_type == "exit": # Graceful exit requested - let Textual clean up terminal properly self.action_quit() - elif msg_type == 'mode_change': + elif msg_type == "mode_change": # Update footer with new chat mode footer = self.query_one(AiderFooter) - footer.update_mode(msg.get('mode', 'code')) + footer.update_mode(msg.get("mode", "code")) def add_output(self, text, task_id=None): """Add output to the output container.""" @@ -205,26 +202,26 @@ def show_confirmation(self, msg): # Show confirmation in status bar status_bar = self.query_one("#status-bar", StatusBar) - status_bar.show_confirm(msg['question'], show_all=True) + status_bar.show_confirm(msg["question"], show_all=True) def update_spinner(self, msg): """Update spinner in footer.""" footer = self.query_one(AiderFooter) - action = msg.get('action', 'start') + action = msg.get("action", "start") - if action == 'start': - footer.start_spinner(msg.get('text', '')) - elif action == 'update': - footer.spinner_text = msg.get('text', '') - elif action == 'stop': + if action == "start": + footer.start_spinner(msg.get("text", "")) + elif action == "update": + footer.spinner_text = msg.get("text", "") + elif action == "stop": footer.stop_spinner() def enable_input(self, msg): """Enable input and update autocomplete data.""" input_area = self.query_one("#input", InputArea) input_area.disabled = False # Ensure input is enabled - files = msg.get('files', []) - commands = msg.get('commands', []) + files = msg.get("files", []) + commands = msg.get("commands", []) input_area.update_autocomplete_data(files, commands) input_area.focus() @@ -253,7 +250,7 @@ def on_input_submitted(self, event): footer = self.query_one(AiderFooter) footer.start_spinner("Thinking...") - self.input_queue.put({'text': user_input}) + self.input_queue.put({"text": user_input}) def action_clear_output(self): """Clear all output.""" @@ -263,7 +260,7 @@ def action_clear_output(self): def action_quit(self): """Quit the application.""" # Prevent multiple quit attempts - if hasattr(self, '_quitting') and self._quitting: + if hasattr(self, "_quitting") and self._quitting: return self._quitting = True @@ -292,7 +289,7 @@ def on_status_bar_confirm_response(self, message: StatusBar.ConfirmResponse): input_area.disabled = False input_area.focus() - self.input_queue.put({'confirmed': message.result}) + self.input_queue.put({"confirmed": message.result}) # Commands that use path-based completion PATH_COMPLETION_COMMANDS = {"/read-only", "/read-only-stub", "/load", "/save"} @@ -303,9 +300,9 @@ def _extract_symbols(self) -> set[str]: # Get current files in chat inchat_files = [] - if hasattr(coder, 'abs_fnames'): + if hasattr(coder, "abs_fnames"): inchat_files.extend(coder.abs_fnames) - if hasattr(coder, 'abs_read_only_fnames'): + if hasattr(coder, "abs_read_only_fnames"): inchat_files.extend(coder.abs_read_only_fnames) # Check if cache is still valid @@ -316,9 +313,9 @@ def _extract_symbols(self) -> set[str]: symbols = set() # Also add filenames as completable symbols - if hasattr(coder, 'get_inchat_relative_files'): + if hasattr(coder, "get_inchat_relative_files"): symbols.update(coder.get_inchat_relative_files()) - if hasattr(coder, 'get_all_relative_files'): + if hasattr(coder, "get_all_relative_files"): # Add all project files too symbols.update(coder.get_all_relative_files()) @@ -336,7 +333,7 @@ def _extract_symbols(self) -> set[str]: for fname in files_to_process: try: - with open(fname, 'r', encoding='utf-8', errors='ignore') as f: + with open(fname, "r", encoding="utf-8", errors="ignore") as f: content = f.read() lexer = guess_lexer_for_filename(fname, content) @@ -370,7 +367,7 @@ def _get_path_completions(self, prefix: str) -> list[str]: from pathlib import Path coder = self.worker.coder - root = Path(coder.root) if hasattr(coder, 'root') else Path.cwd() + root = Path(coder.root) if hasattr(coder, "root") else Path.cwd() # Handle the prefix - could be partial path like "src/ma" or just "ma" if "/" in prefix: @@ -403,7 +400,6 @@ def _get_path_completions(self, prefix: str) -> list[str]: def _get_suggestions(self, text: str) -> list[str]: """Get completion suggestions for given text.""" - input_area = self.query_one("#input", InputArea) suggestions = [] commands = self.worker.coder.commands @@ -443,7 +439,9 @@ def _get_suggestions(self, text: str) -> list[str]: cmd_completions = commands.get_completions(cmd_name) if cmd_completions: if arg_prefix: - suggestions = [c for c in cmd_completions if arg_prefix_lower in str(c).lower()] + suggestions = [ + c for c in cmd_completions if arg_prefix_lower in str(c).lower() + ] else: suggestions = list(cmd_completions) except Exception: @@ -452,7 +450,7 @@ def _get_suggestions(self, text: str) -> list[str]: # Symbol completion triggered by @ # Find the @ and get the prefix after it at_index = text.rfind("@") - prefix = text[at_index + 1:] + prefix = text[at_index + 1 :] suggestions = self._get_symbol_completions(prefix) # No file completion for regular text - use @ for files/symbols @@ -479,9 +477,7 @@ def on_input_area_completion_requested(self, message: InputArea.CompletionReques else: # Create new completion bar completion_bar = CompletionBar( - suggestions=suggestions, - prefix=text, - id="completion-bar" + suggestions=suggestions, prefix=text, id="completion-bar" ) self.mount(completion_bar, before=input_area) else: diff --git a/aider/tui/io.py b/aider/tui/io.py index d6d605b020e..3f4a252adf2 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -37,13 +37,13 @@ def __init__(self, output_queue, input_queue, **kwargs): # Task detection patterns self.task_markers = [ - ('Tool:', 'tool'), - ('Running', 'execution'), - ('Git:', 'git'), - ('Linting', 'lint'), - ('Testing', 'test'), - ('Adding', 'file_op'), - ('Removing', 'file_op'), + ("Tool:", "tool"), + ("Running", "execution"), + ("Git:", "git"), + ("Linting", "lint"), + ("Testing", "test"), + ("Adding", "file_op"), + ("Removing", "file_op"), ] def _detect_task_start(self, text): @@ -58,12 +58,12 @@ def _detect_task_start(self, text): for marker, task_type in self.task_markers: if marker in text: # Extract title from first line, max 50 chars - title = text.split('\n')[0][:50] + title = text.split("\n")[0][:50] return True, title, task_type return False, None, None - def start_task(self, title, task_type='general'): + def start_task(self, title, task_type="general"): """Start a new output task. Args: @@ -71,12 +71,14 @@ def start_task(self, title, task_type='general'): task_type: Type of task """ self.current_task_id = f"task_{time.time()}" - self.output_queue.put({ - 'type': 'start_task', - 'task_id': self.current_task_id, - 'title': title, - 'task_type': task_type, - }) + self.output_queue.put( + { + "type": "start_task", + "task_id": self.current_task_id, + "title": title, + "task_type": task_type, + } + ) def _get_tui_console(self): """Get or create console for TUI rendering.""" @@ -101,16 +103,18 @@ def stream_print(self, *messages, **kwargs): text = capture.get() # Send to TUI via queue - self.output_queue.put({ - 'type': 'output', - 'text': text, - 'task_id': self.current_task_id, - }) + self.output_queue.put( + { + "type": "output", + "text": text, + "task_id": self.current_task_id, + } + ) def stream_output(self, text, final=False): """Override stream_output to send streaming text to TUI. - Uses Textual's MarkdownStream for efficient rendering. + Uses Textual's RichLog for efficient rendering. Args: text: Text to stream @@ -119,25 +123,27 @@ def stream_output(self, text, final=False): # Start response on first chunk if not self._streaming_response and text: self._streaming_response = True - self.output_queue.put({'type': 'start_response'}) + self.output_queue.put({"type": "start_response"}) # Stream the chunk if text: - self.output_queue.put({ - 'type': 'stream_chunk', - 'text': text, - }) + self.output_queue.put( + { + "type": "stream_chunk", + "text": text, + } + ) # End response on final chunk if final and self._streaming_response: self._streaming_response = False - self.output_queue.put({'type': 'end_response'}) + self.output_queue.put({"type": "end_response"}) def reset_streaming_response(self): """Reset streaming state between responses.""" if self._streaming_response: self._streaming_response = False - self.output_queue.put({'type': 'end_response'}) + self.output_queue.put({"type": "end_response"}) def tool_output(self, *messages, **kwargs): """Override tool_output to detect task boundaries and queue output. @@ -147,7 +153,7 @@ def tool_output(self, *messages, **kwargs): **kwargs: Additional arguments """ if messages: - text = ' '.join(str(m) for m in messages) + text = " ".join(str(m) for m in messages) # Check if this should start a new task should_start, title, task_type = self._detect_task_start(text) @@ -168,11 +174,13 @@ def start_spinner(self, text, update_last_text=True): super().start_spinner(text, update_last_text) # Send to TUI - self.output_queue.put({ - 'type': 'spinner', - 'action': 'start', - 'text': text, - }) + self.output_queue.put( + { + "type": "spinner", + "action": "start", + "text": text, + } + ) def update_spinner(self, text): """Override update_spinner to send updates to TUI. @@ -184,11 +192,13 @@ def update_spinner(self, text): super().update_spinner(text) # Send to TUI - self.output_queue.put({ - 'type': 'spinner', - 'action': 'update', - 'text': text, - }) + self.output_queue.put( + { + "type": "spinner", + "action": "update", + "text": text, + } + ) def stop_spinner(self): """Override stop_spinner to send stop state to TUI.""" @@ -196,10 +206,12 @@ def stop_spinner(self): super().stop_spinner() # Send to TUI - self.output_queue.put({ - 'type': 'spinner', - 'action': 'stop', - }) + self.output_queue.put( + { + "type": "spinner", + "action": "stop", + } + ) async def get_input( self, @@ -228,11 +240,13 @@ async def get_input( # Signal TUI that we're ready for input command_names = commands.get_commands() if commands else [] - self.output_queue.put({ - 'type': 'ready_for_input', - 'files': list(addable_rel_fnames) if addable_rel_fnames else [], - 'commands': command_names, - }) + self.output_queue.put( + { + "type": "ready_for_input", + "files": list(addable_rel_fnames) if addable_rel_fnames else [], + "commands": command_names, + } + ) # Wait for input from TUI (blocking in async context) # We need to poll the queue since it's not async @@ -240,10 +254,11 @@ async def get_input( try: # Non-blocking get with timeout import queue + result = self.input_queue.get(timeout=0.1) - if 'text' in result: - user_input = result['text'] + if "text" in result: + user_input = result["text"] # Log the input (same as parent) self.user_input(user_input) @@ -287,7 +302,7 @@ async def confirm_ask( return False if group and group.preference: - return group.preference == 'all' + return group.preference == "all" if group_response and group_response in self.group_responses: return self.group_responses[group_response] @@ -296,45 +311,48 @@ async def confirm_ask( self.num_user_asks += 1 # Send confirmation request to TUI - self.output_queue.put({ - 'type': 'confirmation', - 'question': question, - 'subject': subject, - 'options': { - 'default': default, - 'explicit_yes_required': explicit_yes_required, - 'group': group, - 'group_response': group_response, - 'allow_never': allow_never, - 'allow_tweak': allow_tweak, - 'acknowledge': acknowledge, + self.output_queue.put( + { + "type": "confirmation", + "question": question, + "subject": subject, + "options": { + "default": default, + "explicit_yes_required": explicit_yes_required, + "group": group, + "group_response": group_response, + "allow_never": allow_never, + "allow_tweak": allow_tweak, + "acknowledge": acknowledge, + }, } - }) + ) # Wait for response from TUI while True: try: import queue + result = self.input_queue.get(timeout=0.1) - if 'confirmed' in result: - response = result['confirmed'] + if "confirmed" in result: + response = result["confirmed"] # Handle special responses - if response == 'never': + if response == "never": self.never_prompts.add(question_id) return False - elif response == 'tweak': - return 'tweak' - elif response == 'all': + elif response == "tweak": + return "tweak" + elif response == "all": if group: - group.preference = 'all' + group.preference = "all" if group_response: self.group_responses[group_response] = True return True - elif response == 'skip': + elif response == "skip": if group: - group.preference = 'skip' + group.preference = "skip" if group_response: self.group_responses[group_response] = False return False @@ -365,4 +383,4 @@ def request_exit(self): This sends an exit signal to the TUI instead of calling sys.exit() directly, allowing Textual to properly restore terminal state. """ - self.output_queue.put({'type': 'exit'}) + self.output_queue.put({"type": "exit"}) diff --git a/aider/tui/styles.tcss b/aider/tui/styles.tcss index b687705a56d..32efbe5cba7 100644 --- a/aider/tui/styles.tcss +++ b/aider/tui/styles.tcss @@ -5,9 +5,16 @@ * Theme defined in app.py (AIDER_THEME). */ +* { + background: $background; +} + Screen { background: $background; layout: vertical; + padding: 0; + margin: 0; + border: none; } /* Output area */ @@ -24,34 +31,33 @@ Screen { /* Input area - floating card style */ #input { - height: 3; + height: 5; background: $surface; - border: none; + border: round $secondary; padding: 1 2; - margin: 0 1; + margin: 0 1 1 1; } #input:focus { - border: none; background: $surface; } -/* Footer - solid Aider green */ +/* Footer - same background as everything else */ #footer { height: 1; width: 100%; - background: $primary; - color: $background; + background: $background; + color: $accent; padding: 0 1; } -/* Generic Input styling */ -Input { +/* Generic Input and TextArea styling */ +Input, TextArea { background: $background; border: none; } -Input:focus { +Input:focus, TextArea:focus { border: none; background: $background; } diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py index 489801bc8a6..733229161d4 100644 --- a/aider/tui/widgets/footer.py +++ b/aider/tui/widgets/footer.py @@ -30,7 +30,7 @@ def __init__( project_name: str = "", git_branch: str = "", aider_mode: str = "code", - **kwargs + **kwargs, ): """Initialize footer. @@ -71,39 +71,40 @@ def _get_display_model(self) -> str: def render(self) -> Text: """Render the footer with left/right split.""" + # Build left side: spinner/mode + model left = Text() if self.spinner_visible: spinner_char = self._spinner_chars[self._spinner_frame] - left.append(f"{spinner_char} ", style="bold #000000") + left.append(f"{spinner_char} ") if self.spinner_text: - left.append(self.spinner_text, style="bold #000000") + left.append(self.spinner_text) left.append(" ") if self.aider_mode: - left.append(f"[{self.aider_mode}]", style="bold #003300") + left.append(f"[{self.aider_mode}]") left.append(" ") model_display = self._get_display_model() if model_display: - left.append(model_display, style="#003300") + left.append(model_display) # Build right side: project + git + cost right = Text() if self.project_name: - right.append(self.project_name, style="#003300") + right.append(self.project_name) right.append(" ") if self.git_branch: - right.append(self.git_branch, style="bold #000000") + right.append(self.git_branch) if self.git_dirty: - right.append(f" +{self.git_dirty}", style="bold #552200") + right.append(f" +{self.git_dirty}") right.append(" ") # Always show cost - right.append(f"${self.cost:.2f}", style="#000000") + right.append(f"${self.cost:.2f}") # Calculate padding to right-align try: diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index 25c3c41cb1b..52dafd3c302 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -1,10 +1,10 @@ -"""Output widget for Aider TUI using Textual's Markdown widget.""" +"""Output widget for Aider TUI using Textual's RichLog widget.""" import re -from textual.containers import VerticalScroll +from rich.padding import Padding from textual.message import Message -from textual.widgets import Markdown, Static +from textual.widgets import RichLog class CostUpdate(Message): @@ -15,61 +15,38 @@ def __init__(self, cost: float): super().__init__() -class OutputContainer(VerticalScroll): - """Scrollable output area using Markdown widgets for rich rendering. +class OutputContainer(RichLog): + """Scrollable output area using RichLog widget for rich rendering. - Uses Textual's native Markdown widget with MarkdownStream for - efficient streaming of LLM responses. + Uses Textual's RichLog widget for efficient streaming and display + of LLM responses and system messages. """ DEFAULT_CSS = """ OutputContainer { scrollbar-gutter: stable; background: $background; - } - - OutputContainer > Markdown { - margin: 0 1; - padding: 0; - background: $background; - } - - OutputContainer > .user-message { - margin: 1 1 0 1; - padding: 0; - color: $primary; - background: $background; - } - - OutputContainer > .system-message { - margin: 0 1; - padding: 0; - color: $secondary; - background: $background; + padding: 0 1; } """ + _last_write_type = None + def __init__(self, **kwargs): super().__init__(**kwargs) - self._current_markdown: Markdown | None = None - self._stream = None - self._buffer = "" + # Line buffer for streaming text to avoid word-per-line issue + self._line_buffer = "" + # Enable markup for rich formatting + self.markup = True + self.wrap = True + # self.highlight = True async def start_response(self): """Start a new LLM response section with streaming support.""" - # Stop any existing stream - await self._stop_stream() - - # Create new Markdown widget for this response - self._current_markdown = Markdown("", id=f"response-{len(self.children)}") - await self.mount(self._current_markdown) - - # Create stream for efficient updates - self._stream = Markdown.get_stream(self._current_markdown) - self._buffer = "" - + # Clear the line buffer for new response + self._line_buffer = "" # Keep scrolled to bottom - self.anchor() + self.scroll_end(animate=False) async def stream_chunk(self, text: str): """Stream a chunk of markdown text.""" @@ -79,17 +56,18 @@ async def stream_chunk(self, text: str): # Check for cost updates in the text self._check_cost(text) - if self._stream: - # Use MarkdownStream for efficient batched updates - await self._stream.write(text) - elif self._current_markdown: - # Fallback: append to buffer and update - self._buffer += text - await self._current_markdown.update(self._buffer) - else: - # No active response - start one - await self.start_response() - await self.stream_chunk(text) + # Add text to line buffer + self._line_buffer += text + + # Process complete lines from buffer + while "\n" in self._line_buffer: + line, self._line_buffer = self._line_buffer.split("\n", 1) + # self.write(Padding(line.strip(), (0, 0, 0, 1))) + if line.rstrip(): + self.set_last_write_type("assistant") + self.write(line.rstrip()) + # Scroll to end to show new content + self.scroll_end(animate=False) async def end_response(self): """End the current LLM response.""" @@ -97,39 +75,43 @@ async def end_response(self): async def _stop_stream(self): """Stop the current markdown stream.""" - if self._stream: - try: - await self._stream.stop() - except Exception: - pass - self._stream = None + # Flush any remaining buffer content + if self._line_buffer.strip(): + self.write(self._line_buffer) + self._line_buffer = "" + + # Scroll to end + self.scroll_end(animate=False) def add_user_message(self, text: str): """Add a user message (displayed differently from LLM output).""" - # User messages shown with > prefix, markup disabled to avoid parsing issues - static = Static(f"> {text}", classes="user-message", markup=False) - self.mount(static) + # Escape any Rich markup brackets in user text + text = self._escape_markup(text) + # User messages shown with > prefix in green color + self.set_last_write_type("user") + self.write(f"[bold medium_spring_green]> {text}[/bold medium_spring_green]") self.scroll_end(animate=False) - def add_system_message(self, text: str): + def add_system_message(self, text: str, dim=True): """Add a system/tool message.""" if not text.strip(): return - # Strip ANSI codes - text = re.sub(r'\x1b\[[0-9;]*m', '', text) - # Strip Rich markup tags like [blue], [/bold], etc. - text = re.sub(r'\[/?[a-zA-Z0-9_ #/]+\]', '', text) - - if not text.strip(): - return - - # Create Static with markup disabled to avoid Rich parsing issues - static = Static(text, classes="system-message", markup=False) - self.mount(static) + # Escape any Rich markup brackets + text = self._escape_markup(text).removesuffix("\n") + start = "" + end = "" + # Write system message in secondary color + if dim: + start = "[dim]" + end = "[/dim]" + text = Padding(f"{start}{text}{end}", (0, 0, 0, 2)) + + self.set_last_write_type("system") + self.write(text) self.scroll_end(animate=False) - def add_output(self, text: str, task_id: str = None): + def add_output(self, text: str, task_id: str = None, dim=True): """Add output text as a system message. This handles tool output, status messages, etc. @@ -143,7 +125,7 @@ def add_output(self, text: str, task_id: str = None): # Always treat add_output as system messages # LLM streaming goes through the dedicated stream_chunk path - self.add_system_message(text) + self.add_system_message(text, dim=dim) def _check_cost(self, text: str): """Extract and emit cost updates.""" @@ -156,13 +138,26 @@ def _check_cost(self, text: str): def start_task(self, task_id: str, title: str, task_type: str = "general"): """Start a new task section.""" - static = Static(f"\n{title}", classes="system-message", markup=False) - self.mount(static) + self.write(f"\n[bold]{title}[/bold]") self.scroll_end(animate=False) def clear_output(self): """Clear all output.""" - self._current_markdown = None - self._stream = None - self._buffer = "" - self.remove_children() + self._line_buffer = "" + self.clear() + + def _escape_markup(self, text: str) -> str: + """Escape Rich markup brackets in text. + + In Rich markup, [ and ] are special characters. To display them + literally, they must be escaped by doubling them: [ -> [[, ] -> ]]. + """ + # Simple escaping: replace [ with [[ and ] with ]] + # This works for most cases, though it double-escapes already escaped brackets + return text + + def set_last_write_type(self, type): + if self._last_write_type and self._last_write_type != type: + self.write("") + + self._last_write_type = type From dafcd8bf2b8d72a72f0e7e76c1bae2219bee7b88 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 08:47:59 -0500 Subject: [PATCH 08/28] TUI Changes: - Dial in base styling - Fix cursor visibility - Braille dot spinner - Status bar update, left for dynamic, right for static content - Fix confirmation settings propagation - Confirmations should respect --yes-always flags --- aider/coders/base_coder.py | 10 +- aider/tui/app.py | 47 +++++++--- aider/tui/io.py | 160 +++++++++++++++++++------------- aider/tui/styles.tcss | 50 +++++----- aider/tui/widgets/footer.py | 23 ++--- aider/tui/widgets/input_area.py | 149 ++++++++++++++++++++++------- aider/tui/widgets/output.py | 4 +- aider/tui/widgets/status_bar.py | 87 +++++++++++++---- 8 files changed, 366 insertions(+), 164 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 81f88346279..8dda822c358 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -1250,11 +1250,17 @@ async def _run_linear(self, with_message=None, preproc=True): await self.io.recreate_input() await self.io.input_task user_message = self.io.input_task.result() - self.io.tool_output("Processing...\n") + + if self.args and not self.args.tui: + self.io.tool_output("Processing...\n") + self.io.output_task = asyncio.create_task(self.generate(user_message, preproc)) await self.io.output_task - self.io.tool_output("Finished.") + + if self.args and not self.args.tui: + self.io.tool_output("Finished.") + self.io.ring_bell() user_message = None await self.auto_save_session() diff --git a/aider/tui/app.py b/aider/tui/app.py index 5e062165ff5..87b7de66db0 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -12,17 +12,21 @@ # Aider theme - dark with blue accent AIDER_THEME = Theme( name="aider", - primary="#00ff5f", # Cecli blue + primary="#00ff5f", secondary="#888888", - accent="#00ff87", + accent="#00ff87", # Cecli green foreground="#ffffff", - background="rgba(0,0,0,0.1)", # Near black + background="#1e1e1e", success="#00aa00", warning="#ffd700", error="#ff3333", surface="transparent", # Slightly lighter than background panel="transparent", dark=True, + variables={ + "input-cursor-foreground": "#00ff87", + "input-cursor-text-style": "underline", + }, ) @@ -85,12 +89,13 @@ def compose(self) -> ComposeResult: # ASCII banner for startup BANNER = """ -[bold spring_green2] ██████╗███████╗ ██████╗██╗ ██╗[/bold spring_green2] -[bold spring_green1]██╔════╝██╔════╝██╔════╝██║ ██║[/bold spring_green1] -[bold medium_spring_green]██║ █████╗ ██║ ██║ ██║[/bold medium_spring_green] -[bold cyan2]██║ ██╔══╝ ██║ ██║ ██║[/bold cyan2] -[bold cyan1]╚██████╗███████╗╚██████╗███████╗██║[/bold cyan1] -[bold bright_white] ╚═════╝╚══════╝ ╚═════╝╚══════╝╚═╝[/bold bright_white] +[bold spring_green2] ██████╗███████╗ ██████╗██╗ ██╗[/bold spring_green2] +[bold spring_green1] ██╔════╝██╔════╝██╔════╝██║ ██║[/bold spring_green1] +[bold medium_spring_green] ██║ █████╗ ██║ ██║ ██║[/bold medium_spring_green] +[bold cyan2] ██║ ██╔══╝ ██║ ██║ ██║[/bold cyan2] +[bold cyan1] ╚██████╗███████╗╚██████╗███████╗██║[/bold cyan1] +[bold bright_white] ╚═════╝╚══════╝ ╚═════╝╚══════╝╚═╝[/bold bright_white] + """ def on_mount(self): @@ -200,9 +205,23 @@ def show_confirmation(self, msg): input_area = self.query_one("#input", InputArea) input_area.disabled = True - # Show confirmation in status bar + # Show confirmation in status bar with all options status_bar = self.query_one("#status-bar", StatusBar) - status_bar.show_confirm(msg["question"], show_all=True) + options = msg.get("options", {}) + + # Determine which options to show based on the parameters + show_all = options.get("group") is not None or options.get("group_response") is not None + allow_tweak = options.get("allow_tweak", False) + allow_never = options.get("allow_never", False) + + status_bar.show_confirm( + msg["question"], + show_all=show_all, + allow_tweak=allow_tweak, + allow_never=allow_never, + default=options.get("default", "y"), + explicit_yes_required=options.get("explicit_yes_required", False), + ) def update_spinner(self, msg): """Update spinner in footer.""" @@ -230,9 +249,9 @@ def show_error(self, message): status_bar = self.query_one("#status-bar", StatusBar) status_bar.show_notification(f"Error: {message}", severity="error", timeout=10) - def on_input_submitted(self, event): + def on_input_area_submit(self, message: InputArea.Submit): """Handle input submission.""" - user_input = event.value + user_input = message.value if not user_input.strip(): return @@ -241,7 +260,7 @@ def on_input_submitted(self, event): input_area = self.query_one("#input", InputArea) input_area.save_to_history(user_input) - event.input.value = "" + input_area.value = "" # Show user's message in output self.add_user_message(user_input) diff --git a/aider/tui/io.py b/aider/tui/io.py index 3f4a252adf2..a5beabe5124 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -296,71 +296,107 @@ async def confirm_ask( Returns: User's response (True, False, "tweak", etc.) """ - # Check if already answered via group or never - question_id = (question, subject) - if question_id in self.never_prompts: - return False - - if group and group.preference: - return group.preference == "all" - - if group_response and group_response in self.group_responses: - return self.group_responses[group_response] - - # Increment user ask counter self.num_user_asks += 1 - # Send confirmation request to TUI - self.output_queue.put( - { - "type": "confirmation", - "question": question, - "subject": subject, - "options": { - "default": default, - "explicit_yes_required": explicit_yes_required, - "group": group, - "group_response": group_response, - "allow_never": allow_never, - "allow_tweak": allow_tweak, - "acknowledge": acknowledge, - }, - } - ) - - # Wait for response from TUI - while True: - try: - import queue - - result = self.input_queue.get(timeout=0.1) + question_id = (question, subject) - if "confirmed" in result: - response = result["confirmed"] - - # Handle special responses - if response == "never": - self.never_prompts.add(question_id) - return False - elif response == "tweak": - return "tweak" - elif response == "all": - if group: - group.preference = "all" - if group_response: - self.group_responses[group_response] = True - return True - elif response == "skip": - if group: - group.preference = "skip" - if group_response: - self.group_responses[group_response] = False - return False - else: - # Regular boolean response - return bool(response) - except queue.Empty: - await asyncio.sleep(0.1) + try: + if question_id in self.never_prompts: + return False + + if group and not group.show_group: + group = None + if group: + allow_never = True + + valid_responses = ["yes", "no", "skip", "all"] + options = " (Y)es/(N)o" + + if allow_tweak: + valid_responses.append("tweak") + options += "/(T)weak" + if group or group_response: + if not explicit_yes_required or group_response: + options += "/(A)ll" + options += "/(S)kip all" + if allow_never: + options += "/(D)on't ask again" + valid_responses.append("don't") + + if default.lower().startswith("y"): + question += options + " [Yes]: " + elif default.lower().startswith("n"): + question += options + " [No]: " + else: + question += options + f" [{default}]: " + + # Handle self.yes parameter (auto-yes for non-explicit confirmations) + if self.yes is True and not explicit_yes_required: + res = "y" + # Log the auto-response + hist = f"{question.strip()} {res}" + self.append_chat_history(hist, linebreak=True, blockquote=True) + return True + elif group and group.preference: + res = group.preference + self.user_input(f"{question} - {res}", log_only=False) + elif group_response and group_response in self.group_responses: + return self.group_responses[group_response] + else: + # Send confirmation request to TUI with full options + self.output_queue.put( + { + "type": "confirmation", + "question": question, + "subject": subject, + "options": { + "default": default, + "explicit_yes_required": explicit_yes_required, + "group": group, + "group_response": group_response, + "allow_never": allow_never, + "allow_tweak": allow_tweak, + "acknowledge": acknowledge, + "valid_responses": valid_responses, + }, + } + ) + + # Wait for response from TUI + while True: + try: + import queue + + result = self.input_queue.get(timeout=0.1) + + if "confirmed" in result: + response = result["confirmed"] + + # Handle special responses + if response == "never": + self.never_prompts.add(question_id) + return False + elif response == "tweak": + return "tweak" + elif response == "all": + if group: + group.preference = "all" + if group_response: + self.group_responses[group_response] = True + return True + elif response == "skip": + if group: + group.preference = "skip" + if group_response: + self.group_responses[group_response] = False + return False + else: + # Regular boolean response + return bool(response) + except queue.Empty: + await asyncio.sleep(0.1) + except asyncio.CancelledError: + return False async def stop_task_streams(self): """Override to avoid asyncio issues in worker thread. diff --git a/aider/tui/styles.tcss b/aider/tui/styles.tcss index 32efbe5cba7..a276d55a294 100644 --- a/aider/tui/styles.tcss +++ b/aider/tui/styles.tcss @@ -6,7 +6,13 @@ */ * { - background: $background; + background: $background; + scrollbar-background: $surface; + scrollbar-background-hover: $surface; + scrollbar-background-active: $surface; + scrollbar-color: $secondary; + scrollbar-color-hover: $accent; + scrollbar-color-active: $accent; } Screen { @@ -21,47 +27,49 @@ Screen { #output { height: 1fr; width: 100%; - background: $background; - padding: 0 1; + background: $surface; + padding: 0 0 0 1; margin: 0; - scrollbar-background: $background; - scrollbar-color: $surface; scrollbar-size: 1 1; } /* Input area - floating card style */ #input { - height: 5; + height: auto; + max-height: 33%; + min-height: 5; background: $surface; - border: round $secondary; - padding: 1 2; - margin: 0 1 1 1; + border: round $accent 50%; + padding: 0 0 0 2; + margin: 0 1 2 1; + scrollbar-size: 1 1; } #input:focus { background: $surface; } -/* Footer - same background as everything else */ -#footer { - height: 1; - width: 100%; - background: $background; - color: $accent; - padding: 0 1; -} - /* Generic Input and TextArea styling */ Input, TextArea { - background: $background; + background: $surface; border: none; } Input:focus, TextArea:focus { border: none; - background: $background; + background: $surface; } -Input > .input--placeholder { +Input > .input--placeholder, TextArea > .text-area--placeholder { color: $secondary; + background: $surface; +} + +/* Footer - same background as everything else */ +#footer { + height: 1; + width: 100%; + background: $surface; + color: $accent; + padding: 0 1; } diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py index 733229161d4..287a9749f1e 100644 --- a/aider/tui/widgets/footer.py +++ b/aider/tui/widgets/footer.py @@ -22,7 +22,7 @@ class AiderFooter(Static): spinner_text = reactive("") spinner_visible = reactive(False) _spinner_frame = 0 - _spinner_chars = "◐◓◑◒" + _spinner_chars = "⠏⠛⠹⠼⠶⠧" def __init__( self, @@ -82,29 +82,30 @@ def render(self) -> Text: left.append(self.spinner_text) left.append(" ") + # Build right side: mode + model + project + git + right = Text() + if self.aider_mode: - left.append(f"[{self.aider_mode}]") - left.append(" ") + right.append(f"{self.aider_mode}") + right.append(" • ") model_display = self._get_display_model() if model_display: - left.append(model_display) - - # Build right side: project + git + cost - right = Text() + right.append(f"{model_display}") + right.append(" • ") if self.project_name: - right.append(self.project_name) - right.append(" ") + right.append(f"{self.project_name}") + right.append(" • ") if self.git_branch: right.append(self.git_branch) if self.git_dirty: right.append(f" +{self.git_dirty}") - right.append(" ") + # right.append(" ") # Always show cost - right.append(f"${self.cost:.2f}") + # right.append(f"${self.cost:.2f}") # Calculate padding to right-align try: diff --git a/aider/tui/widgets/input_area.py b/aider/tui/widgets/input_area.py index d9b14c43056..417e30295fb 100644 --- a/aider/tui/widgets/input_area.py +++ b/aider/tui/widgets/input_area.py @@ -2,12 +2,19 @@ from prompt_toolkit.history import FileHistory from textual.message import Message -from textual.widgets import Input +from textual.widgets import TextArea -class InputArea(Input): +class InputArea(TextArea): """Input widget with autocomplete and history support.""" + class Submit(Message): + """User submitted the input (Enter key).""" + + def __init__(self, value: str): + self.value = value + super().__init__() + class CompletionRequested(Message): """User requested completion (Tab key or auto-trigger).""" @@ -17,14 +24,17 @@ def __init__(self, text: str): class CompletionCycle(Message): """User wants to cycle through completions.""" + pass class CompletionAccept(Message): """User wants to accept current completion.""" + pass class CompletionDismiss(Message): """User wants to dismiss completions.""" + pass def __init__(self, history_file: str = None, **kwargs): @@ -33,10 +43,19 @@ def __init__(self, history_file: str = None, **kwargs): Args: history_file: Path to input history file for up/down navigation """ - super().__init__( - placeholder="> Type your message...", - **kwargs - ) + super().__init__(show_line_numbers=False, **kwargs) + # Note: placeholder is not a constructor argument in some versions of Textual TextArea, + # but it is a reactive property. We set it here. + # Check if placeholder was passed in kwargs, if not use default + # (kwargs are passed to super, so if it WAS passed, it might be handled or ignored depending on version) + # To be safe, we set it explicitly if not in kwargs, but we can't easily check what super did. + # We'll just set it. + # But wait, kwargs might have had it. + # Let's assume kwargs might handle it or we set it. + # Actually, let's just set the default if it's empty. + if not self.placeholder: + self.placeholder = "> Type your message... (Ctrl+s to send, Enter for new line)" + self.files = [] self.commands = [] self.completion_active = False @@ -47,6 +66,43 @@ def __init__(self, history_file: str = None, **kwargs): self._history_index = -1 # -1 = not navigating, 0+ = position in history self._saved_input = "" # Saves current input when navigating history + @property + def value(self) -> str: + """Alias for text property to maintain compatibility.""" + return self.text + + @value.setter + def value(self, new_value: str): + """Alias for text property to maintain compatibility.""" + self.text = new_value + + @property + def cursor_position(self) -> int: + """ + Get cursor position as an index (compatibility wrapper). + Note: This is approximate/incomplete for multi-line but helps compat. + It returns the offset from start of text. + """ + # Calculate offset based on cursor_location (row, col) + # This is expensive, but necessary for compat if used heavily. + # Or we can just ignore getters if not used. + # app.py uses `len(input_area.value)` to set it. + # So it uses setter. + return 0 # Dummy getter + + @cursor_position.setter + def cursor_position(self, pos: int): + """ + Set cursor position (compatibility wrapper). + If pos is len(text), move to end. + """ + if pos >= len(self.text): + # Move cursor to the very end + lines = self.text.split("\n") + row = max(0, len(lines) - 1) + col = len(lines[row]) + self.cursor_location = (row, col) + def _ensure_history_loaded(self) -> list[str]: """Lazily load history on first access. @@ -57,7 +113,9 @@ def _ensure_history_loaded(self) -> list[str]: if self.history_file: try: # FileHistory returns most recent first, so reverse it - self._history = list(reversed(list(FileHistory(self.history_file).load_history_strings()))) + self._history = list( + reversed(list(FileHistory(self.history_file).load_history_strings())) + ) except (OSError, IOError): pass # History file doesn't exist yet or can't be read return self._history @@ -109,15 +167,15 @@ def _history_prev(self) -> None: # Save current input when first entering history if self._history_index == -1: - self._saved_input = self.value + self._saved_input = self.text self._history_index = len(history) - 1 elif self._history_index > 0: self._history_index -= 1 else: return # Already at oldest - self.value = history[self._history_index] - self.cursor_position = len(self.value) + self.text = history[self._history_index] + self.cursor_position = len(self.text) # Will move to end def _history_next(self) -> None: """Navigate to next (newer) history entry.""" @@ -127,19 +185,44 @@ def _history_next(self) -> None: history = self._ensure_history_loaded() if self._history_index < len(history) - 1: self._history_index += 1 - self.value = history[self._history_index] + self.text = history[self._history_index] else: # Back to current input self._history_index = -1 - self.value = self._saved_input + self.text = self._saved_input - self.cursor_position = len(self.value) + self.cursor_position = len(self.text) # Will move to end def on_key(self, event) -> None: """Handle keys for completion and history navigation.""" if self.disabled: return + if event.key == "ctrl+c": + event.stop() + event.prevent_default() + if self.text.strip(): + self.save_to_history(self.text) + self.text = "" + return + + if event.key == "ctrl+s": + # Submit message + event.stop() + event.prevent_default() + self.post_message(self.Submit(self.text)) + return + + if event.key == "enter": + if self.completion_active: + # Accept completion + self.post_message(self.CompletionAccept()) + event.stop() + event.prevent_default() + return + else: + return + if event.key == "tab": event.stop() event.prevent_default() @@ -148,34 +231,32 @@ def on_key(self, event) -> None: self.post_message(self.CompletionCycle()) else: # Request completions - self.post_message(self.CompletionRequested(self.value)) + self.post_message(self.CompletionRequested(self.text)) elif event.key == "escape" and self.completion_active: event.stop() event.prevent_default() self.post_message(self.CompletionDismiss()) elif event.key == "up": - # Navigate to previous history entry - event.stop() - event.prevent_default() - self._history_prev() + # If on first line, navigate history + # Or use Ctrl+Up? Let's use Up if on first line for convenience, similar to typical shell + # BUT this is a text editor. + # Let's try: if cursor is at (0,0) or just row 0. + if self.cursor_location[0] == 0: + event.stop() + event.prevent_default() + self._history_prev() elif event.key == "down": - # Navigate to next history entry - event.stop() - event.prevent_default() - self._history_next() + # If on last line, navigate history + if self.cursor_location[0] == self.document.line_count - 1: + event.stop() + event.prevent_default() + self._history_next() - def on_input_changed(self, event) -> None: + def on_text_area_changed(self, event) -> None: """Update completions as user types.""" + # Note: Event name for TextArea change is 'Changed' but handler is on_text_area_changed if not self.disabled: + val = self.text # Auto-trigger for slash commands, @ symbols, or update existing completions - if event.value.startswith("/") or "@" in event.value or self.completion_active: - self.post_message(self.CompletionRequested(event.value)) - - def on_input_submitted(self, event) -> None: - """Handle Enter key - accept completion if active.""" - if self.completion_active: - # Let app handle accepting completion - self.post_message(self.CompletionAccept()) - # Prevent the default submit behavior - event.stop() - event.prevent_default() + if val.startswith("/") or "@" in val or self.completion_active: + self.post_message(self.CompletionRequested(val)) diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index 52dafd3c302..d9c928bfeba 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -25,8 +25,8 @@ class OutputContainer(RichLog): DEFAULT_CSS = """ OutputContainer { scrollbar-gutter: stable; - background: $background; - padding: 0 1; + background: $surface; + padding: 0 0; } """ diff --git a/aider/tui/widgets/status_bar.py b/aider/tui/widgets/status_bar.py index 9b8d64ea58f..80e99ac47b8 100644 --- a/aider/tui/widgets/status_bar.py +++ b/aider/tui/widgets/status_bar.py @@ -20,8 +20,8 @@ class StatusBar(Widget, can_focus=True): DEFAULT_CSS = """ StatusBar { height: auto; - background: $background; - margin: 0 1; + background: $surface; + margin: 0 0; padding: 0; } @@ -32,10 +32,10 @@ class StatusBar(Widget, can_focus=True): /* Content container */ StatusBar .status-content { height: 3; - padding: 1 2; + padding: 1 0 0 2; layout: horizontal; align: left middle; - background: $background; + background: $surface; } /* Notification styles */ @@ -44,7 +44,7 @@ class StatusBar(Widget, can_focus=True): height: 1; color: $secondary; content-align: left middle; - background: $background; + background: $surface; } StatusBar .notification-text.info { @@ -69,14 +69,14 @@ class StatusBar(Widget, can_focus=True): height: 1; color: $foreground; content-align: left middle; - background: $background; + background: $surface; } StatusBar .confirm-hints { width: auto; height: 1; dock: right; - background: $background; + background: $surface; } StatusBar .hint { @@ -84,7 +84,7 @@ class StatusBar(Widget, can_focus=True): height: 1; margin-left: 2; color: $secondary; - background: $background; + background: $surface; } StatusBar .hint-yes { @@ -98,6 +98,18 @@ class StatusBar(Widget, can_focus=True): StatusBar .hint-all { color: $secondary; } + + StatusBar .hint-skip { + color: $secondary; + } + + StatusBar .hint-tweak { + color: $accent; + } + + StatusBar .hint-never { + color: $warning; + } """ # Current mode @@ -116,6 +128,10 @@ def __init__(self, **kwargs): self._text = "" self._severity = "info" self._show_all = False + self._allow_tweak = False + self._allow_never = False + self._default = "y" + self._explicit_yes_required = False self._timer = None def compose(self) -> ComposeResult: @@ -129,7 +145,7 @@ def watch_mode(self, mode: str) -> None: self.add_class("hidden") self.can_focus = False else: - self.can_focus = (mode == "confirm") + self.can_focus = mode == "confirm" def _rebuild_content(self) -> None: """Rebuild the content based on current mode.""" @@ -137,9 +153,7 @@ def _rebuild_content(self) -> None: container.remove_children() if self.mode == "notification": - container.mount( - Static(self._text, classes=f"notification-text {self._severity}") - ) + container.mount(Static(self._text, classes=f"notification-text {self._severity}")) elif self.mode == "confirm": container.mount(Static(self._text, classes="confirm-question")) hints = Horizontal(classes="confirm-hints") @@ -148,12 +162,14 @@ def _rebuild_content(self) -> None: hints.mount(Static("\\[n]o", classes="hint hint-no")) if self._show_all: hints.mount(Static("\\[a]ll", classes="hint hint-all")) + hints.mount(Static("\\[s]kip", classes="hint hint-skip")) + if self._allow_tweak: + hints.mount(Static("\\[t]weak", classes="hint hint-tweak")) + if self._allow_never: + hints.mount(Static("\\[d]on't ask", classes="hint hint-never")) def show_notification( - self, - text: str, - severity: str = "info", - timeout: float | None = 3.0 + self, text: str, severity: str = "info", timeout: float | None = 3.0 ) -> None: """Show a transient notification message. @@ -175,12 +191,24 @@ def show_notification( if timeout: self._timer = self.set_timer(timeout, self.hide) - def show_confirm(self, question: str, show_all: bool = False) -> None: + def show_confirm( + self, + question: str, + show_all: bool = False, + allow_tweak: bool = False, + allow_never: bool = False, + default: str = "y", + explicit_yes_required: bool = False, + ) -> None: """Show a confirmation prompt. Args: question: Question to display show_all: Whether to show "all" option + allow_tweak: Whether to show "tweak" option + allow_never: Whether to show "don't ask again" option + default: Default response ("y" or "n") + explicit_yes_required: Whether explicit yes is required """ # Cancel any existing timer if self._timer: @@ -189,6 +217,10 @@ def show_confirm(self, question: str, show_all: bool = False) -> None: self._text = question self._show_all = show_all + self._allow_tweak = allow_tweak + self._allow_never = allow_never + self._default = default + self._explicit_yes_required = explicit_yes_required self.mode = "confirm" self._rebuild_content() self.focus() @@ -221,8 +253,27 @@ def on_key(self, event) -> None: event.prevent_default() self.post_message(self.ConfirmResponse("all")) self.hide() + elif key == "s" and self._show_all: + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse("skip")) + self.hide() + elif key == "t" and self._allow_tweak: + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse("tweak")) + self.hide() + elif key == "d" and self._allow_never: + event.stop() + event.prevent_default() + self.post_message(self.ConfirmResponse("never")) + self.hide() elif key == "escape": event.stop() event.prevent_default() - self.post_message(self.ConfirmResponse(False)) + # Return default based on default parameter + if self._default.lower().startswith("y"): + self.post_message(self.ConfirmResponse(True)) + else: + self.post_message(self.ConfirmResponse(False)) self.hide() From 9c3742c4ffb6cb38920935d054d2bf2435e92450 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 08:48:19 -0500 Subject: [PATCH 09/28] Update agent mode system prompts --- aider/coders/agent_prompts.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/aider/coders/agent_prompts.py b/aider/coders/agent_prompts.py index 68dd74f7c6f..7d0bf390d48 100644 --- a/aider/coders/agent_prompts.py +++ b/aider/coders/agent_prompts.py @@ -17,14 +17,14 @@ class AgentPrompts(CoderPrompts): ## Core Directives - **Role**: Act as an expert software engineer. - **Act Proactively**: Autonomously use file discovery and context management tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `View`, `Remove`) to gather information and fulfill the user's request. Chain tool calls across multiple turns to continue exploration. -- **Be Decisive**: Do not ask the same question or search for the same term in multiple ways. Trust your initial valid findings. +- **Be Decisive**: Do not ask the same question or search for the same term in multiple ways. Trust that your initial findings are valid. - **Be Concise**: Keep all responses brief and direct (1-3 sentences). Avoid preamble, postamble, and unnecessary explanations. -- **Confirm Ambiguity**: Before applying complex or ambiguous edits, briefly state your plan and ask for confirmation. For simple, direct edits, proceed without confirmation. +- **Confirm Ambiguity**: Before applying complex or ambiguous edits, briefly state your plan. For simple, direct edits, proceed without confirmation. ## Core Workflow -1. **Plan**: Determine the necessary changes. Use the `UpdateTodoList` tool to manage your plan. Always begin by the todo list. +1. **Plan**: Determine the necessary changes. Use the `UpdateTodoList` tool to manage your plan. Always begin by updating the todo list. 2. **Explore**: Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `Grep`) to find relevant files. These tools add files to context as read-only. Use `Grep` first for broad searches to avoid context clutter. Concisely describe your search strategy with the `Thinking` tool. 3. **Think**: Given the contents of your exploration, concisely reason through the edits with the `Thinking` tool that need to be made to accomplish the goal. For complex edits, briefly outline your plan for the user. 4. **Execute**: Use the appropriate editing tool. Remember to use `MakeEditable` on a file before modifying it. Break large edits (those greater than ~100 lines) into multiple smaller steps. Proactively use skills if they are available @@ -36,10 +36,7 @@ class AgentPrompts(CoderPrompts): - **Plan Steps**: Create a todo list at the start of complex tasks to track your progress through multiple exploration rounds. - **Stay Organized**: Update the todo list as you complete steps every 3-10 tool calls to maintain context across multiple tool calls. -## Code Editing Hierarchy -Your primary method for all modifications is through granular tool calls. Use SEARCH/REPLACE only as a last resort. - -### 1. Granular Tools (Always Preferred) +### Editing Tools Use these for precision and safety. - **Text/Block Manipulation**: `ReplaceText` (Preferred for the majority of edits), `InsertBlock`, `DeleteBlock`, `ReplaceAll` (use with `dry_run=True` for safety). - **Line-Based Edits**: `ReplaceLine(s)`, `DeleteLine(s)`, `IndentLines`. @@ -78,9 +75,10 @@ class AgentPrompts(CoderPrompts): system_reminder = """ ## Reminders +- Stay on task. Do not pursue goals the user did not ask for. - Any tool call automatically continues to the next turn. Provide no tool calls in your final answer. - Use context blocks (directory structure, git status) to orient yourself. -- Remove files you are done with viewing/editing from the context with the `Remove` tool. It is fine to re-add them later +- Remove files from the context when you are done with viewing/editing with the `Remove` tool. It is fine to re-add them later, if they are needed again - Remove skills if they are not helpful for your current task with `RemoveSkill` {lazy_prompt} From 8ec5c54daeb7e281587e50891d9291ae37b916c3 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 08:54:29 -0500 Subject: [PATCH 10/28] FIle formatting --- aider/tui/widgets/completion_bar.py | 17 ++++++++----- aider/tui/worker.py | 38 ++++++++++++++--------------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/aider/tui/widgets/completion_bar.py b/aider/tui/widgets/completion_bar.py index cfc5ecd7fdd..7bcfc70c73c 100644 --- a/aider/tui/widgets/completion_bar.py +++ b/aider/tui/widgets/completion_bar.py @@ -68,6 +68,7 @@ def __init__(self, value: str): class Dismissed(Message): """Completion bar dismissed.""" + pass def __init__(self, suggestions: list[str] = None, prefix: str = "", **kwargs): @@ -78,7 +79,7 @@ def __init__(self, suggestions: list[str] = None, prefix: str = "", **kwargs): prefix: Current input prefix to complete from """ super().__init__(**kwargs) - self.suggestions = (suggestions or [])[:self.MAX_SUGGESTIONS] + self.suggestions = (suggestions or [])[: self.MAX_SUGGESTIONS] self.prefix = prefix self.selected_index = 0 self._item_widgets: list[Static] = [] @@ -121,7 +122,7 @@ def _compute_display_names(self) -> None: # Use the directory part of common prefix self._common_prefix = common.rsplit("/", 1)[0] + "/" if "/" in common else "" if self._common_prefix: - self._display_names = [s[len(self._common_prefix):] for s in self.suggestions] + self._display_names = [s[len(self._common_prefix) :] for s in self.suggestions] else: self._display_names = self.suggestions[:] else: @@ -162,7 +163,7 @@ def compose(self) -> ComposeResult: def update_suggestions(self, suggestions: list[str], prefix: str = "") -> None: """Update suggestions in place.""" - self.suggestions = suggestions[:self.MAX_SUGGESTIONS] + self.suggestions = suggestions[: self.MAX_SUGGESTIONS] self.prefix = prefix self.selected_index = 0 @@ -182,7 +183,9 @@ def _refresh_items(self) -> None: # Ensure meta widgets exist if self._left_more is None or self._left_more.parent is None: self._left_more = Static("", classes="completion-more") - self.mount(self._left_more, before=self._item_widgets[0] if self._item_widgets else None) + self.mount( + self._left_more, before=self._item_widgets[0] if self._item_widgets else None + ) if self._right_more is None or self._right_more.parent is None: self._right_more = Static("", classes="completion-more") self.mount(self._right_more, after=self._left_more if self._left_more else None) @@ -194,7 +197,9 @@ def _refresh_items(self) -> None: while len(self._item_widgets) < self.WINDOW_SIZE: new_item = Static("", classes="completion-item") self._item_widgets.append(new_item) - target = self._right_more if self._right_more and self._right_more.parent else self._hint + target = ( + self._right_more if self._right_more and self._right_more.parent else self._hint + ) self.mount(new_item, before=target) if not self._display_names: @@ -209,7 +214,7 @@ def _refresh_items(self) -> None: # Build display order: selected item first, then others after it total = len(self._display_names) items_before = self.selected_index - items_after = total - self.selected_index - 1 + # items_after = total - self.selected_index - 1 # Show indicator if there are items before selected if self._left_more: diff --git a/aider/tui/worker.py b/aider/tui/worker.py index 1ea4f25a6a7..4d0e9cacda0 100644 --- a/aider/tui/worker.py +++ b/aider/tui/worker.py @@ -3,18 +3,18 @@ import asyncio import logging import threading +import warnings from typing import Optional from aider.coders import Coder from aider.commands import SwitchCoder # Suppress asyncio task destroyed warnings during shutdown -logging.getLogger('asyncio').setLevel(logging.CRITICAL) +logging.getLogger("asyncio").setLevel(logging.CRITICAL) # Also suppress via warnings module -import warnings -warnings.filterwarnings('ignore', message='.*Task was destroyed.*') -warnings.filterwarnings('ignore', message='.*coroutine.*was never awaited.*') +warnings.filterwarnings("ignore", message=".*Task was destroyed.*") +warnings.filterwarnings("ignore", message=".*coroutine.*was never awaited.*") class CoderWorker: @@ -30,7 +30,7 @@ def __init__(self, coder, output_queue, input_queue): """ self.coder = coder self.output_queue = output_queue # queue.Queue - self.input_queue = input_queue # queue.Queue + self.input_queue = input_queue # queue.Queue self.thread: Optional[threading.Thread] = None self.loop: Optional[asyncio.AbstractEventLoop] = None self.running = False @@ -113,23 +113,21 @@ async def _async_run(self): self.coder.mcp_tools = old_mcp_tools # Notify TUI of mode change - edit_format = getattr(self.coder, 'edit_format', 'code') or 'code' - self.output_queue.put({ - 'type': 'mode_change', - 'mode': edit_format, - }) + edit_format = getattr(self.coder, "edit_format", "code") or "code" + self.output_queue.put( + { + "type": "mode_change", + "mode": edit_format, + } + ) except Exception as e: - self.output_queue.put({ - 'type': 'error', - 'message': f"Failed to switch mode: {e}" - }) + self.output_queue.put( + {"type": "error", "message": f"Failed to switch mode: {e}"} + ) break # Continue the loop with the new coder except Exception as e: - self.output_queue.put({ - 'type': 'error', - 'message': str(e) - }) + self.output_queue.put({"type": "error", "message": str(e)}) break def stop(self): @@ -137,9 +135,9 @@ def stop(self): self.running = False # Signal the coder to stop - if hasattr(self.coder, 'input_running'): + if hasattr(self.coder, "input_running"): self.coder.input_running = False - if hasattr(self.coder, 'output_running'): + if hasattr(self.coder, "output_running"): self.coder.output_running = False if self.loop and self.loop.is_running(): From f3239ee557cebdd422cd09069604e38de2805cd4 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 11:05:49 -0500 Subject: [PATCH 11/28] Add print output capturing --- aider/io.py | 7 +++--- aider/tui/__init__.py | 6 ++--- aider/tui/app.py | 46 ++++++++++++++++++------------------- aider/tui/io.py | 9 ++++++++ aider/tui/widgets/output.py | 41 ++++++++++++++++++++++----------- 5 files changed, 67 insertions(+), 42 deletions(-) diff --git a/aider/io.py b/aider/io.py index da04bc7679d..8a67cdd9d2e 100644 --- a/aider/io.py +++ b/aider/io.py @@ -343,7 +343,6 @@ def __init__( self.chat_history_file = None self.placeholder = None - self.fallback_spinner = None self.prompt_session = None self.interrupted = False self.never_prompts = set() @@ -459,6 +458,8 @@ def __init__( self.spinner_frame_index = 0 self.spinner_last_frame_index = 0 self.unicode_palette = "░█" + self.fallback_spinner = None + self.fallback_spinner_enabled = True if fancy_input: # If unicode is supported, use the rich 'dots2' spinner, otherwise an ascii fallback @@ -527,7 +528,7 @@ def start_spinner(self, text, update_last_text=True): if update_last_text: self.last_spinner_text = text - else: + elif self.fallback_spinner_enabled: self.fallback_spinner = Spinner(text) self.fallback_spinner.step() @@ -548,7 +549,7 @@ def stop_spinner(self): # Keep last frame index to avoid spinner "jumping" on restart self.spinner_last_frame_index = self.spinner_frame_index - if self.fallback_spinner: + if self.fallback_spinner and self.fallback_spinner_enabled: self.fallback_spinner.end() self.fallback_spinner = None diff --git a/aider/tui/__init__.py b/aider/tui/__init__.py index 286c775909c..05b56a30c04 100644 --- a/aider/tui/__init__.py +++ b/aider/tui/__init__.py @@ -6,11 +6,11 @@ import queue -from .app import AiderApp +from .app import TUI from .io import TextualInputOutput from .worker import CoderWorker -__all__ = ["AiderApp", "TextualInputOutput", "CoderWorker", "create_tui_io", "launch_tui"] +__all__ = ["TUI", "TextualInputOutput", "CoderWorker", "create_tui_io", "launch_tui"] def create_tui_io(args, editing_mode): @@ -72,7 +72,7 @@ async def launch_tui(coder, output_queue, input_queue): Exit code from TUI """ worker = CoderWorker(coder, output_queue, input_queue) - app = AiderApp(worker, output_queue, input_queue) + app = TUI(worker, output_queue, input_queue) return_code = await app.run_async() diff --git a/aider/tui/app.py b/aider/tui/app.py index 87b7de66db0..3e4345a6fe0 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -9,28 +9,8 @@ from .widgets import AiderFooter, CompletionBar, InputArea, OutputContainer, StatusBar from .widgets.output import CostUpdate -# Aider theme - dark with blue accent -AIDER_THEME = Theme( - name="aider", - primary="#00ff5f", - secondary="#888888", - accent="#00ff87", # Cecli green - foreground="#ffffff", - background="#1e1e1e", - success="#00aa00", - warning="#ffd700", - error="#ff3333", - surface="transparent", # Slightly lighter than background - panel="transparent", - dark=True, - variables={ - "input-cursor-foreground": "#00ff87", - "input-cursor-text-style": "underline", - }, -) - - -class AiderApp(App): + +class TUI(App): """Main Textual application for Aider TUI.""" CSS_PATH = "styles.tcss" @@ -51,7 +31,26 @@ def __init__(self, coder_worker, output_queue, input_queue): self._symbols_files_hash = None # Register and set aider theme - self.register_theme(AIDER_THEME) + BASE_THEME = Theme( + name="aider", + primary="#00ff5f", + secondary="#888888", + accent="#00ff87", # Cecli green + foreground="#ffffff", + background="#1e1e1e", + success="#00aa00", + warning="#ffd700", + error="#ff3333", + surface="transparent", # Slightly lighter than background + panel="transparent", + dark=True, + variables={ + "input-cursor-foreground": "#00ff87", + "input-cursor-text-style": "underline", + }, + ) + + self.register_theme(BASE_THEME) self.theme = "aider" def compose(self) -> ComposeResult: @@ -103,6 +102,7 @@ def on_mount(self): # Show startup banner output_container = self.query_one("#output", OutputContainer) output_container.add_output(self.BANNER, dim=False) + self.begin_capture_print(output_container, stdout=True, stderr=True) self.set_interval(0.05, self.check_output_queue) self.worker.start() diff --git a/aider/tui/io.py b/aider/tui/io.py index a5beabe5124..7ad9b90aa46 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -35,6 +35,9 @@ def __init__(self, output_queue, input_queue, **kwargs): # LLM response streaming state self._streaming_response = False + # Disable fallback spinner so it doesn't clutter terminal output + self.fallback_spinner_enabled = False + # Task detection patterns self.task_markers = [ ("Tool:", "tool"), @@ -46,6 +49,12 @@ def __init__(self, output_queue, input_queue, **kwargs): ("Removing", "file_op"), ] + def rule(self): + pass + + def get_bottom_toolbar(self): + pass + def _detect_task_start(self, text): """Detect if this output should start a new task. diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index d9c928bfeba..39360377efa 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -3,6 +3,8 @@ import re from rich.padding import Padding +from rich.style import Style as RichStyle +from textual import events, on from textual.message import Message from textual.widgets import RichLog @@ -85,8 +87,6 @@ async def _stop_stream(self): def add_user_message(self, text: str): """Add a user message (displayed differently from LLM output).""" - # Escape any Rich markup brackets in user text - text = self._escape_markup(text) # User messages shown with > prefix in green color self.set_last_write_type("user") self.write(f"[bold medium_spring_green]> {text}[/bold medium_spring_green]") @@ -98,7 +98,7 @@ def add_system_message(self, text: str, dim=True): return # Escape any Rich markup brackets - text = self._escape_markup(text).removesuffix("\n") + text = text.removesuffix("\n") start = "" end = "" # Write system message in secondary color @@ -127,6 +127,13 @@ def add_output(self, text: str, task_id: str = None, dim=True): # LLM streaming goes through the dedicated stream_chunk path self.add_system_message(text, dim=dim) + def add_output_styled(self, text: str, styles=None): + if not styles: + styles = dict() + + styles = RichStyle(**styles) + self.write(Padding(styles.render(text=text), (0, 0, 0, 2))) + def _check_cost(self, text: str): """Extract and emit cost updates.""" match = re.search(r"\$(\d+\.?\d*)\s*session", text) @@ -146,18 +153,26 @@ def clear_output(self): self._line_buffer = "" self.clear() - def _escape_markup(self, text: str) -> str: - """Escape Rich markup brackets in text. - - In Rich markup, [ and ] are special characters. To display them - literally, they must be escaped by doubling them: [ -> [[, ] -> ]]. - """ - # Simple escaping: replace [ with [[ and ] with ]] - # This works for most cases, though it double-escapes already escaped brackets - return text - def set_last_write_type(self, type): if self._last_write_type and self._last_write_type != type: self.write("") self._last_write_type = type + + @on(events.Print) + def log_print(self, event: events.Print) -> None: + """Writes the captured print output to the RichLog widget.""" + if event.text.strip(): + theme_vars = self.app.get_css_variables() + color = theme_vars.get("warning") + write_type = "stdout" + + if event.stderr: + color = theme_vars.get("error") + write_type = "stderr" + + self.set_last_write_type(write_type) + self.add_output_styled(event.text, {"color": color}) + + # Prevent the event from bubbling further + event.prevent_default() From 07e8152b03efbb3517cdfefb64f7af95e706583a Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 12:26:48 -0500 Subject: [PATCH 12/28] Add spinner suffixes back to TUI mode, friendlier scrolling during output generation --- aider/tui/app.py | 2 ++ aider/tui/io.py | 18 ++++++++++++++++++ aider/tui/widgets/footer.py | 6 +++++- aider/tui/widgets/output.py | 37 +++++++++++++++++++++++++++---------- 4 files changed, 52 insertions(+), 11 deletions(-) diff --git a/aider/tui/app.py b/aider/tui/app.py index 3e4345a6fe0..6e5ff1db277 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -232,6 +232,8 @@ def update_spinner(self, msg): footer.start_spinner(msg.get("text", "")) elif action == "update": footer.spinner_text = msg.get("text", "") + elif action == "update_suffix": + footer.spinner_suffix = msg.get("text", "") elif action == "stop": footer.stop_spinner() diff --git a/aider/tui/io.py b/aider/tui/io.py index 7ad9b90aa46..4579ccd203d 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -209,6 +209,24 @@ def update_spinner(self, text): } ) + def update_spinner_suffix(self, text=None): + """Override update_spinner_suffix to send updates to TUI. + + Args: + text: New spinner suffix text + """ + # Call parent + super().update_spinner_suffix(text) + + # Send to TUI + self.output_queue.put( + { + "type": "spinner", + "action": "update_suffix", + "text": text, + } + ) + def stop_spinner(self): """Override stop_spinner to send stop state to TUI.""" # Call parent diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py index 287a9749f1e..632956973bf 100644 --- a/aider/tui/widgets/footer.py +++ b/aider/tui/widgets/footer.py @@ -20,6 +20,7 @@ class AiderFooter(Static): # Spinner state spinner_text = reactive("") + spinner_suffix = reactive("") spinner_visible = reactive(False) _spinner_frame = 0 _spinner_chars = "⠏⠛⠹⠼⠶⠧" @@ -80,7 +81,10 @@ def render(self) -> Text: left.append(f"{spinner_char} ") if self.spinner_text: left.append(self.spinner_text) - left.append(" ") + + if self.spinner_suffix: + left.append(" • ") + left.append(self.spinner_suffix) # Build right side: mode + model + project + git right = Text() diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index 39360377efa..730a838e379 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -38,17 +38,16 @@ def __init__(self, **kwargs): super().__init__(**kwargs) # Line buffer for streaming text to avoid word-per-line issue self._line_buffer = "" + # Enable markup for rich formatting + self.highlight = True self.markup = True self.wrap = True - # self.highlight = True async def start_response(self): """Start a new LLM response section with streaming support.""" # Clear the line buffer for new response self._line_buffer = "" - # Keep scrolled to bottom - self.scroll_end(animate=False) async def stream_chunk(self, text: str): """Stream a chunk of markdown text.""" @@ -68,8 +67,6 @@ async def stream_chunk(self, text: str): if line.rstrip(): self.set_last_write_type("assistant") self.write(line.rstrip()) - # Scroll to end to show new content - self.scroll_end(animate=False) async def end_response(self): """End the current LLM response.""" @@ -82,12 +79,10 @@ async def _stop_stream(self): self.write(self._line_buffer) self._line_buffer = "" - # Scroll to end - self.scroll_end(animate=False) - def add_user_message(self, text: str): """Add a user message (displayed differently from LLM output).""" # User messages shown with > prefix in green color + self.auto_scroll = True self.set_last_write_type("user") self.write(f"[bold medium_spring_green]> {text}[/bold medium_spring_green]") self.scroll_end(animate=False) @@ -109,7 +104,6 @@ def add_system_message(self, text: str, dim=True): self.set_last_write_type("system") self.write(text) - self.scroll_end(animate=False) def add_output(self, text: str, task_id: str = None, dim=True): """Add output text as a system message. @@ -146,7 +140,6 @@ def _check_cost(self, text: str): def start_task(self, task_id: str, title: str, task_type: str = "general"): """Start a new task section.""" self.write(f"\n[bold]{title}[/bold]") - self.scroll_end(animate=False) def clear_output(self): """Clear all output.""" @@ -176,3 +169,27 @@ def log_print(self, event: events.Print) -> None: # Prevent the event from bubbling further event.prevent_default() + + @on(events.MouseScrollUp) + def disable_auto_scroll(self, event: events.MouseScrollUp) -> None: + """ + Event handler called when the screen is scrolled up. + Disables automatic scrolling + """ + self.auto_scroll = False + + @on(events.MouseScrollDown) + def enable_auto_scroll(self, event: events.MouseScrollDown) -> None: + """ + Event handler called when the screen is scrolled down. + Enables automatic scrolling if we are near the end + """ + + # Calculate the relevant dimensions + scroll_top = self.scroll_y + view_height = self.size.height + content_height = self.content_size.height + + # Check if scrolled to the bottom (allowing for minor floating point inaccuracies) + if scroll_top + view_height >= content_height - 32: + self.auto_scroll = True From f7fa82d5027f2068ec80177e8912db1164c2f6ac Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 17:40:08 -0500 Subject: [PATCH 13/28] TUI Updates: - Run Command print handling - Background jobs like fzf to run correctly with run_obstructive() - Propagate output type for new lines - Prevent triple new lines from being written for the aesthetic --- aider/coders/base_coder.py | 3 ++ aider/commands.py | 28 ++++++++----- aider/io.py | 2 +- aider/run_cmd.py | 26 ++++++------ aider/tools/command.py | 10 +++++ aider/tools/command_interactive.py | 7 ++++ aider/tools/utils/output.py | 5 ++- aider/tui/__init__.py | 4 ++ aider/tui/app.py | 16 ++++++++ aider/tui/io.py | 14 +++++++ aider/tui/widgets/output.py | 64 +++++++++++++++++++++++++----- aider/tui/worker.py | 15 +++---- aider/utils.py | 25 +++++++++++- 13 files changed, 176 insertions(+), 43 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 8dda822c358..9f99e602158 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -152,6 +152,9 @@ class Coder: ok_to_warm_cache = False + # Weak reference to TUI app instance (when running in TUI mode) + tui = None + @classmethod async def create( self, diff --git a/aider/commands.py b/aider/commands.py index 1db34ec87a2..bb4bea285cc 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -920,7 +920,7 @@ async def cmd_add(self, args): if not addable_files: self.io.tool_output("No files available to add.") return - selected_files = run_fzf(addable_files, multi=True) + selected_files = run_fzf(addable_files, multi=True, coder=self.coder) if not selected_files: return args = " ".join([self.quote_fname(f) for f in selected_files]) @@ -1254,19 +1254,29 @@ async def cmd_run(self, args, add_on_nonzero_exit=False): "Run a shell command and optionally add the output to the chat (alias: !)" try: self.cmd_running = True + should_print = True + + if self.coder.args.tui: + should_print = False + exit_status, combined_output = await asyncio.to_thread( run_cmd, args, verbose=self.verbose, - error_print=self.io.tool_error, + error_print=self.coder.io.tool_error, cwd=self.coder.root, + should_print=should_print, ) + self.cmd_running = False - # This print statement, for whatever reason, - # allows the thread to properly yield control of the terminal - # to the main program - print("") + if self.coder.args.tui: + print(combined_output) + else: + # This print statement, for whatever reason, + # allows the thread to properly yield control of the terminal + # to the main program + print("") if combined_output is None: return @@ -1834,7 +1844,7 @@ def cmd_read_only(self, args): target_mode="read-only", ) return - selected_files = run_fzf(addable_files, multi=True) + selected_files = run_fzf(addable_files, multi=True, coder=self.coder) if not selected_files: # If user didn't select any files, convert all editable files to read-only self._cmd_read_only_base( @@ -1872,7 +1882,7 @@ def cmd_read_only_stub(self, args): target_mode="read-only (stub)", ) return - selected_files = run_fzf(addable_files, multi=True) + selected_files = run_fzf(addable_files, multi=True, coder=self.coder) if not selected_files: # If user didn't select any files, convert all editable files to read-only stubs self._cmd_read_only_base( @@ -2066,7 +2076,7 @@ def cmd_edit(self, args=""): def cmd_history_search(self, args): "Fuzzy search in history and paste it in the prompt" history_lines = self.io.get_input_history() - selected_lines = run_fzf(history_lines) + selected_lines = run_fzf(history_lines, coder=self.coder) if selected_lines: self.io.set_placeholder("".join(selected_lines)) diff --git a/aider/io.py b/aider/io.py index 8a67cdd9d2e..80854cfcc6d 100644 --- a/aider/io.py +++ b/aider/io.py @@ -1399,7 +1399,7 @@ def tool_error(self, message="", strip=True): def tool_warning(self, message="", strip=True): self._tool_message(message, strip, self.tool_warning_color) - def tool_output(self, *messages, log_only=False, bold=False): + def tool_output(self, *messages, log_only=False, bold=False, type=None): if messages: hist = " ".join(messages) hist = f"{hist.strip()}" diff --git a/aider/run_cmd.py b/aider/run_cmd.py index df92503d7e4..6d6111f88f2 100644 --- a/aider/run_cmd.py +++ b/aider/run_cmd.py @@ -8,12 +8,12 @@ import psutil -def run_cmd(command, verbose=False, error_print=None, cwd=None): +def run_cmd(command, verbose=False, error_print=None, cwd=None, should_print=True): try: if sys.stdin.isatty() and hasattr(pexpect, "spawn") and platform.system() != "Windows": - return run_cmd_pexpect(command, verbose, cwd) + return run_cmd_pexpect(command, verbose, cwd, should_print=should_print) - return run_cmd_subprocess(command, verbose, cwd) + return run_cmd_subprocess(command, verbose, cwd, should_print=should_print) except OSError as e: error_message = f"Error occurred while running command '{command}': {str(e)}" if error_print is None: @@ -39,7 +39,9 @@ def get_windows_parent_process_name(): return None -def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.encoding): +def run_cmd_subprocess( + command, verbose=False, cwd=None, encoding=sys.stdout.encoding, should_print=True +): if verbose: print("Using run_cmd_subprocess:", command) @@ -68,18 +70,18 @@ def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.enc executable=shell if platform.system() != "Windows" else None, encoding=encoding, errors="replace", - bufsize=0, # Set bufsize to 0 for unbuffered output + bufsize=1, # Set bufsize to 0 for unbuffered output universal_newlines=True, cwd=cwd, ) output = [] - while True: - chunk = process.stdout.read(1) - if not chunk: - break - print(chunk, end="", flush=True) # Print the chunk in real-time - output.append(chunk) # Store the chunk for later use + + for line in process.stdout: + output.append(line) + + if should_print: + print(line, end="", flush=True) process.wait() return process.returncode, "".join(output) @@ -87,7 +89,7 @@ def run_cmd_subprocess(command, verbose=False, cwd=None, encoding=sys.stdout.enc return 1, str(e) -def run_cmd_pexpect(command, verbose=False, cwd=None): +def run_cmd_pexpect(command, verbose=False, cwd=None, should_print=True): """ Run a shell command interactively using pexpect, capturing all output. diff --git a/aider/tools/command.py b/aider/tools/command.py index 9a9e3537b84..bd86d7c2aae 100644 --- a/aider/tools/command.py +++ b/aider/tools/command.py @@ -52,6 +52,12 @@ async def execute(cls, coder, command_string): coder.io.tool_output(f"Skipped execution of shell command: {command_string}") return "Shell command execution skipped by user." + should_print = True + tui = None + if coder.tui and coder.tui(): + tui = coder.tui() + should_print = False + # Proceed with execution if confirmed is True coder.io.tool_output(f"⚙️ Executing non-interactive shell command: {command_string}") @@ -60,6 +66,7 @@ async def execute(cls, coder, command_string): command_string, verbose=coder.verbose, cwd=coder.root, # Execute in the project root + should_print=should_print, ) # Format the output for the result message, include more content @@ -74,6 +81,9 @@ async def execute(cls, coder, command_string): " large_file_token_threshold)" ) + if tui: + coder.io.tool_output(output_content) + if exit_status == 0: return ( f"Shell command executed successfully (exit code 0). Output:\n{output_content}" diff --git a/aider/tools/command_interactive.py b/aider/tools/command_interactive.py index ffff8f8156c..d447e0b9536 100644 --- a/aider/tools/command_interactive.py +++ b/aider/tools/command_interactive.py @@ -51,6 +51,12 @@ async def execute(cls, coder, command_string): coder.io.tool_output(f"Skipped execution of shell command: {command_string}") return "Shell command execution skipped by user." + should_print = True + # tui = None + if coder.tui and coder.tui(): + # tui = coder.tui() + should_print = False + coder.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}") coder.io.tool_output(">>> You may need to interact with the command below <<<") coder.io.tool_output(" \n") @@ -64,6 +70,7 @@ async def execute(cls, coder, command_string): verbose=coder.verbose, # Pass verbose flag error_print=coder.io.tool_error, # Use io for error printing cwd=coder.root, # Execute in the project root + should_print=should_print, ) await asyncio.sleep(1) diff --git a/aider/tools/utils/output.py b/aider/tools/utils/output.py index b038de924db..4c8f84dce4f 100644 --- a/aider/tools/utils/output.py +++ b/aider/tools/utils/output.py @@ -30,7 +30,8 @@ def tool_header(coder, mcp_server, tool_response): color_start, color_end = color_markers(coder) coder.io.tool_output( - f"{color_start}Tool Call:{color_end} {mcp_server.name} • {tool_response.function.name}" + f"{color_start}Tool Call:{color_end} {mcp_server.name} • {tool_response.function.name}", + type="Tool Call", ) @@ -102,7 +103,7 @@ def tool_footer(coder, tool_response): coder.io.tool_output(f"Tool ID: {tool_response.id}") coder.io.tool_output(f"Tool type: {tool_response.type}") - coder.io.tool_output("\n") + coder.io.tool_output("\n", type="tool-footer") def color_markers(coder): diff --git a/aider/tui/__init__.py b/aider/tui/__init__.py index 05b56a30c04..f3107c3be42 100644 --- a/aider/tui/__init__.py +++ b/aider/tui/__init__.py @@ -5,6 +5,7 @@ """ import queue +import weakref from .app import TUI from .io import TextualInputOutput @@ -74,6 +75,9 @@ async def launch_tui(coder, output_queue, input_queue): worker = CoderWorker(coder, output_queue, input_queue) app = TUI(worker, output_queue, input_queue) + # Set weak reference to TUI app on the coder instance + coder.tui = weakref.ref(app) + return_code = await app.run_async() worker.stop() diff --git a/aider/tui/app.py b/aider/tui/app.py index 6e5ff1db277..d04dc3f1071 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -1,5 +1,6 @@ """Main Textual application for Aider TUI.""" +import concurrent.futures import queue from textual.app import App, ComposeResult @@ -297,6 +298,21 @@ def _do_quit(self): self.worker.stop() self.exit() + def run_obstructive(self, func, *args, **kwargs): + """Run a function with the TUI suspended, called from a worker thread.""" + future = concurrent.futures.Future() + + def wrapper(): + try: + with self.suspend(): + result = func(*args, **kwargs) + future.set_result(result) + except Exception as e: + future.set_exception(e) + + self.call_from_thread(wrapper) + return future.result() + def on_cost_update(self, message: CostUpdate): """Handle cost update from output.""" footer = self.query_one(AiderFooter) diff --git a/aider/tui/io.py b/aider/tui/io.py index 4579ccd203d..5d0dcd33d1e 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -163,9 +163,15 @@ def tool_output(self, *messages, **kwargs): """ if messages: text = " ".join(str(m) for m in messages) + type = kwargs.get("type", None) # Check if this should start a new task should_start, title, task_type = self._detect_task_start(text) + + if type: + should_start = True + title = type + if should_start: self.start_task(title, task_type) @@ -191,6 +197,14 @@ def start_spinner(self, text, update_last_text=True): } ) + self.output_queue.put( + { + "type": "spinner", + "action": "update_suffix", + "text": "", + } + ) + def update_spinner(self, text): """Override update_spinner to send updates to TUI. diff --git a/aider/tui/widgets/output.py b/aider/tui/widgets/output.py index 730a838e379..8923a5da546 100644 --- a/aider/tui/widgets/output.py +++ b/aider/tui/widgets/output.py @@ -4,6 +4,7 @@ from rich.padding import Padding from rich.style import Style as RichStyle +from rich.text import Text from textual import events, on from textual.message import Message from textual.widgets import RichLog @@ -33,6 +34,7 @@ class OutputContainer(RichLog): """ _last_write_type = None + _write_history = [] def __init__(self, **kwargs): super().__init__(**kwargs) @@ -66,7 +68,7 @@ async def stream_chunk(self, text: str): # self.write(Padding(line.strip(), (0, 0, 0, 1))) if line.rstrip(): self.set_last_write_type("assistant") - self.write(line.rstrip()) + self.output(line.rstrip()) async def end_response(self): """End the current LLM response.""" @@ -75,8 +77,8 @@ async def end_response(self): async def _stop_stream(self): """Stop the current markdown stream.""" # Flush any remaining buffer content - if self._line_buffer.strip(): - self.write(self._line_buffer) + if self._line_buffer.rstrip(): + self.output(self.rstrip()) self._line_buffer = "" def add_user_message(self, text: str): @@ -84,7 +86,7 @@ def add_user_message(self, text: str): # User messages shown with > prefix in green color self.auto_scroll = True self.set_last_write_type("user") - self.write(f"[bold medium_spring_green]> {text}[/bold medium_spring_green]") + self.output(f"[bold medium_spring_green]> {text}[/bold medium_spring_green]") self.scroll_end(animate=False) def add_system_message(self, text: str, dim=True): @@ -103,7 +105,7 @@ def add_system_message(self, text: str, dim=True): text = Padding(f"{start}{text}{end}", (0, 0, 0, 2)) self.set_last_write_type("system") - self.write(text) + self.output(text) def add_output(self, text: str, task_id: str = None, dim=True): """Add output text as a system message. @@ -125,8 +127,12 @@ def add_output_styled(self, text: str, styles=None): if not styles: styles = dict() - styles = RichStyle(**styles) - self.write(Padding(styles.render(text=text), (0, 0, 0, 2))) + style = RichStyle(**styles) + with self.app.console.capture() as capture: + self.app.console.print(Text(text), style=style) + capture_text = capture.get() + + self.output(Padding(capture_text, (0, 0, 0, 2))) def _check_cost(self, text: str): """Extract and emit cost updates.""" @@ -139,7 +145,7 @@ def _check_cost(self, text: str): def start_task(self, task_id: str, title: str, task_type: str = "general"): """Start a new task section.""" - self.write(f"\n[bold]{title}[/bold]") + self.set_last_write_type(f"{task_id}-{title}-{task_type}") def clear_output(self): """Clear all output.""" @@ -147,11 +153,47 @@ def clear_output(self): self.clear() def set_last_write_type(self, type): - if self._last_write_type and self._last_write_type != type: - self.write("") + if type and self._last_write_type and self._last_write_type != type: + self.output("") self._last_write_type = type + def output(self, text, check_duplicates=True): + """Write output with duplicate newline checking. + + Args: + text: The text to write + check_duplicates: If True, check for duplicate newlines before writing + """ + with self.app.console.capture() as capture: + self.app.console.print(text) + check = Text(capture.get()).plain + + # self.write(str(self._write_history)) + # self.write(repr(check)) + + # Check for duplicate newlines + + if check_duplicates and len(self._write_history) >= 2: + nl_check = check in ["", "\n", "\\n"] + nl_last = self._write_history[-1] in ["", "\n", "\\n"] + nl_penultimate = self._write_history[-2] in ["", "\n", "\\n"] or self._write_history[ + -2 + ].endswith("\n") + + if nl_check and nl_last and nl_penultimate: + return + + # Call the actual write method + self.write(text) + + # Log the write + self._write_history.append(check) + + # Keep history size manageable + if len(self._write_history) > 5: + self._write_history.pop(0) + @on(events.Print) def log_print(self, event: events.Print) -> None: """Writes the captured print output to the RichLog widget.""" @@ -165,7 +207,7 @@ def log_print(self, event: events.Print) -> None: write_type = "stderr" self.set_last_write_type(write_type) - self.add_output_styled(event.text, {"color": color}) + self.add_output_styled(event.text.removesuffix("\n"), {"color": color}) # Prevent the event from bubbling further event.prevent_default() diff --git a/aider/tui/worker.py b/aider/tui/worker.py index 4d0e9cacda0..4678a103e8b 100644 --- a/aider/tui/worker.py +++ b/aider/tui/worker.py @@ -102,17 +102,18 @@ async def _async_run(self): kwargs["args"] = self.coder.args # Skip summarization to avoid blocking LLM calls during mode switch kwargs["summarize_from_coder"] = False + kwargs["mcp_servers"] = [] # Empty to skip initialization + new_coder = await Coder.create(**kwargs) + new_coder.args = self.coder.args # Transfer MCP state to avoid re-initialization - old_mcp_servers = self.coder.mcp_servers - old_mcp_tools = self.coder.mcp_tools - kwargs["mcp_servers"] = [] # Empty to skip initialization - self.coder = await Coder.create(**kwargs) - # Restore MCP state - self.coder.mcp_servers = old_mcp_servers - self.coder.mcp_tools = old_mcp_tools + new_coder.mcp_servers = self.coder.mcp_servers + new_coder.mcp_tools = self.coder.mcp_tools + # Transfer TUI app weak reference + new_coder.tui = self.coder.tui # Notify TUI of mode change + self.coder = new_coder edit_format = getattr(self.coder, "edit_format", "code") or "code" self.output_queue.put( { diff --git a/aider/utils.py b/aider/utils.py index 50b0e023fdd..a171ca8466d 100644 --- a/aider/utils.py +++ b/aider/utils.py @@ -14,7 +14,7 @@ IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".pdf"} -def run_fzf(input_data, multi=False): +def _execute_fzf(input_data, multi=False): """ Runs fzf as a subprocess, feeding it input_data. Returns the selected items. @@ -46,6 +46,29 @@ def run_fzf(input_data, multi=False): return [] +def run_fzf(input_data, multi=False, coder=None): + """ + Runs fzf as a subprocess, feeding it input_data. + Returns the selected items. + """ + if not shutil.which("fzf"): + return [] # fzf not available + + tui = None + if coder is not None and coder.tui: + tui = coder.tui() + + result = [] + + if tui: + result = tui.run_obstructive(_execute_fzf, input_data, multi=multi) + + else: + result = _execute_fzf(input_data, multi=multi) + + return result + + class IgnorantTemporaryDirectory: def __init__(self): if sys.version_info >= (3, 10): From 58c208647882f7a652bf1ec326b44c80f068c0de Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 18:48:09 -0500 Subject: [PATCH 14/28] TUI Updates: Stop generation task wIth escape --- aider/tui/app.py | 12 ++++++++++++ aider/tui/worker.py | 10 ++++++++++ 2 files changed, 22 insertions(+) diff --git a/aider/tui/app.py b/aider/tui/app.py index d04dc3f1071..3899ec336b3 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -19,6 +19,7 @@ class TUI(App): BINDINGS = [ # Binding("ctrl+c", "quit", "Quit", show=True), Binding("ctrl+l", "clear_output", "Clear", show=True), + Binding("escape", "interrupt", "Interrupt", show=True), ] def __init__(self, coder_worker, output_queue, input_queue): @@ -279,6 +280,17 @@ def action_clear_output(self): output_container = self.query_one("#output", OutputContainer) output_container.clear_output() + def action_interrupt(self): + """Interrupt the current task.""" + if self.worker: + self.worker.interrupt() + # Notify user + try: + status_bar = self.query_one("#status-bar", StatusBar) + status_bar.show_notification("Interrupting...", severity="warning", timeout=3) + except Exception: + pass + def action_quit(self): """Quit the application.""" # Prevent multiple quit attempts diff --git a/aider/tui/worker.py b/aider/tui/worker.py index 4678a103e8b..8effbab3467 100644 --- a/aider/tui/worker.py +++ b/aider/tui/worker.py @@ -131,6 +131,16 @@ async def _async_run(self): self.output_queue.put({"type": "error", "message": str(e)}) break + def interrupt(self): + """Cancel the current output task on the coder instance.""" + if self.coder and hasattr(self.coder, "io") and self.coder.io: + # Cancel the output task if it exists + if hasattr(self.coder.io, "output_task") and self.coder.io.output_task: + self.coder.io.output_task.cancel() + # Also set output_running to False to stop the output_task loop + if hasattr(self.coder, "output_running"): + self.coder.output_running = False + def stop(self): """Stop the worker thread gracefully.""" self.running = False From 871b556de0662370600792a739fd064d9fd9d208 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 19:43:02 -0500 Subject: [PATCH 15/28] TUI Updates: - Add key hints for submission and stopping - Move model name to left by default for balance --- aider/tui/app.py | 27 ++++++++++++++++++++++++++- aider/tui/styles.tcss | 14 ++++++++++++-- aider/tui/widgets/__init__.py | 2 ++ aider/tui/widgets/footer.py | 25 ++++++++++++++++--------- aider/tui/widgets/input_area.py | 2 +- aider/tui/widgets/key_hints.py | 16 ++++++++++++++++ 6 files changed, 73 insertions(+), 13 deletions(-) create mode 100644 aider/tui/widgets/key_hints.py diff --git a/aider/tui/app.py b/aider/tui/app.py index 3899ec336b3..098a4b12c1a 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -7,7 +7,14 @@ from textual.binding import Binding from textual.theme import Theme -from .widgets import AiderFooter, CompletionBar, InputArea, OutputContainer, StatusBar +from .widgets import ( + AiderFooter, + CompletionBar, + InputArea, + KeyHints, + OutputContainer, + StatusBar, +) from .widgets.output import CostUpdate @@ -80,6 +87,7 @@ def compose(self) -> ComposeResult: yield OutputContainer(id="output") yield StatusBar(id="status-bar") yield InputArea(history_file=history_file, id="input") + yield KeyHints(id="key-hints") yield AiderFooter( model_name=model_name, project_name=project_name, @@ -110,9 +118,23 @@ def on_mount(self): self.worker.start() self.query_one("#input").focus() + # Initialize key hints + self.update_key_hints() + # Load git info in background to avoid blocking startup self.call_later(self._load_git_info) + def update_key_hints(self, generating=False): + """Update the key hints below the input area.""" + try: + hints = self.query_one(KeyHints) + if generating: + hints.update("escape to cancel") + else: + hints.update("ctrl+s to submit") + except Exception: + pass + def _load_git_info(self): """Load git branch and dirty count (deferred to avoid blocking startup).""" footer = self.query_one(AiderFooter) @@ -241,6 +263,7 @@ def update_spinner(self, msg): def enable_input(self, msg): """Enable input and update autocomplete data.""" + self.update_key_hints(generating=False) input_area = self.query_one("#input", InputArea) input_area.disabled = False # Ensure input is enabled files = msg.get("files", []) @@ -273,6 +296,8 @@ def on_input_area_submit(self, message: InputArea.Submit): footer = self.query_one(AiderFooter) footer.start_spinner("Thinking...") + self.update_key_hints(generating=True) + self.input_queue.put({"text": user_input}) def action_clear_output(self): diff --git a/aider/tui/styles.tcss b/aider/tui/styles.tcss index a276d55a294..334aaadb36d 100644 --- a/aider/tui/styles.tcss +++ b/aider/tui/styles.tcss @@ -41,7 +41,7 @@ Screen { background: $surface; border: round $accent 50%; padding: 0 0 0 2; - margin: 0 1 2 1; + margin: 0 1 0 1; scrollbar-size: 1 1; } @@ -65,6 +65,16 @@ Input > .input--placeholder, TextArea > .text-area--placeholder { background: $surface; } + +/* Key hints below input */ +#key-hints { + height: 1; + width: 100%; + text-align: right; + color: $secondary; + padding: 0 2 0 0; + margin: 0 0 1 0; +} /* Footer - same background as everything else */ #footer { height: 1; @@ -72,4 +82,4 @@ Input > .input--placeholder, TextArea > .text-area--placeholder { background: $surface; color: $accent; padding: 0 1; -} +} \ No newline at end of file diff --git a/aider/tui/widgets/__init__.py b/aider/tui/widgets/__init__.py index dec40f7fe26..e5c287433bb 100644 --- a/aider/tui/widgets/__init__.py +++ b/aider/tui/widgets/__init__.py @@ -3,6 +3,7 @@ from .completion_bar import CompletionBar from .footer import AiderFooter from .input_area import InputArea +from .key_hints import KeyHints from .output import OutputContainer from .status_bar import StatusBar @@ -10,6 +11,7 @@ "AiderFooter", "CompletionBar", "InputArea", + "KeyHints", "OutputContainer", "StatusBar", ] diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py index 632956973bf..1e9ff5d637e 100644 --- a/aider/tui/widgets/footer.py +++ b/aider/tui/widgets/footer.py @@ -63,11 +63,14 @@ def _get_display_model(self) -> str: if not self.model_name: return "" # Strip common prefixes like "openrouter/x-ai/" - name = self.model_name - if "/" in name: - name = name.split("/")[-1] - if len(name) > 25: - name = name[:22] + "..." + name = self.app.worker.coder.main_model.name + if len(name) > 40: + if "/" in name: + name = name.split("/")[-1] + + if len(name) > 35: + name = name[:35] + "..." + return name def render(self) -> Text: @@ -85,6 +88,10 @@ def render(self) -> Text: if self.spinner_suffix: left.append(" • ") left.append(self.spinner_suffix) + else: + left.append("Model") + left.append(" • ") + left.append(self._get_display_model()) # Build right side: mode + model + project + git right = Text() @@ -93,10 +100,10 @@ def render(self) -> Text: right.append(f"{self.aider_mode}") right.append(" • ") - model_display = self._get_display_model() - if model_display: - right.append(f"{model_display}") - right.append(" • ") + # model_display = self._get_display_model() + # if model_display: + # right.append(f"{model_display}") + # right.append(" • ") if self.project_name: right.append(f"{self.project_name}") diff --git a/aider/tui/widgets/input_area.py b/aider/tui/widgets/input_area.py index 417e30295fb..6d489a9a7bf 100644 --- a/aider/tui/widgets/input_area.py +++ b/aider/tui/widgets/input_area.py @@ -54,7 +54,7 @@ def __init__(self, history_file: str = None, **kwargs): # Let's assume kwargs might handle it or we set it. # Actually, let's just set the default if it's empty. if not self.placeholder: - self.placeholder = "> Type your message... (Ctrl+s to send, Enter for new line)" + self.placeholder = "> Type your message... (ctrl+s to send, enter for new line)" self.files = [] self.commands = [] diff --git a/aider/tui/widgets/key_hints.py b/aider/tui/widgets/key_hints.py new file mode 100644 index 00000000000..89674ee3df0 --- /dev/null +++ b/aider/tui/widgets/key_hints.py @@ -0,0 +1,16 @@ +from textual.widgets import Static + + +class KeyHints(Static): + """Key hints widget.""" + + DEFAULT_CSS = """ + KeyHints { + text-align: right; + color: $secondary; + padding: 0 2 0 0; + height: 1; + width: 100%; + margin: 0 0 1 0; + } + """ From 6cdb5b02586efe8ccf8f783d007339cd1eceb009 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 19:51:14 -0500 Subject: [PATCH 16/28] TUI Updates: Always auto complete last contiguous block --- aider/tui/app.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/aider/tui/app.py b/aider/tui/app.py index 098a4b12c1a..8facf245b53 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -489,10 +489,12 @@ def _get_suggestions(self, text: str) -> list[str]: suggestions = all_commands else: suggestions = [c for c in all_commands if c.startswith(cmd_part)] - else: + elif len(parts) > 1: # Complete command argument cmd_name = cmd_part - arg_prefix = parts[1] if len(parts) > 1 else "" + end_lookup = text.rsplit(maxsplit=1) + + arg_prefix = end_lookup[1] arg_prefix_lower = arg_prefix.lower() # Check if this command needs path-based completion From 42e3b06625ceef8813ddca26a7f18f3525322704 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 19:57:23 -0500 Subject: [PATCH 17/28] TUI Updates: Reduce aliases to 2 since I still like "ce.cli" --- aider/tui/widgets/footer.py | 2 +- pyproject.toml | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/aider/tui/widgets/footer.py b/aider/tui/widgets/footer.py index 1e9ff5d637e..6f727cd178a 100644 --- a/aider/tui/widgets/footer.py +++ b/aider/tui/widgets/footer.py @@ -89,7 +89,7 @@ def render(self) -> Text: left.append(" • ") left.append(self.spinner_suffix) else: - left.append("Model") + left.append("cecli") left.append(" • ") left.append(self._get_display_model()) diff --git a/pyproject.toml b/pyproject.toml index e305050eb84..d43fbe87e2d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,9 +23,7 @@ Homepage = "https://github.com/dwash96/aider-ce" [project.scripts] aider-ce = "aider.main:main" -"ce" = "aider.main:main" "cecli" = "aider.main:main" -"ce-cli" = "aider.main:main" "ce.cli" = "aider.main:main" [tool.setuptools.dynamic] From b25450b0214799b3b556970ed59a6b47ffa80677 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 21:32:41 -0500 Subject: [PATCH 18/28] TUI Updates: Add active file list inside common input container --- aider/commands.py | 16 +++--- aider/tui/app.py | 33 ++++++++----- aider/tui/io.py | 18 ++++++- aider/tui/styles.tcss | 39 ++++++++++++--- aider/tui/widgets/__init__.py | 2 + aider/tui/widgets/completion_bar.py | 4 +- aider/tui/widgets/file_list.py | 76 +++++++++++++++++++++++++++++ 7 files changed, 160 insertions(+), 28 deletions(-) create mode 100644 aider/tui/widgets/file_list.py diff --git a/aider/commands.py b/aider/commands.py index bb4bea285cc..338ea3cfdb8 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -1428,7 +1428,7 @@ def cmd_ls(self, args): files = self.coder.get_all_relative_files() - other_files = [] + # other_files = [] chat_files = [] read_only_files = [] read_only_stub_files = [] @@ -1436,8 +1436,8 @@ def cmd_ls(self, args): abs_file_path = self.coder.abs_root_path(file) if abs_file_path in self.coder.abs_fnames: chat_files.append(file) - else: - other_files.append(file) + # else: + # other_files.append(file) # Add read-only files for abs_file_path in self.coder.abs_read_only_fnames: @@ -1449,14 +1449,14 @@ def cmd_ls(self, args): rel_file_path = self.coder.get_rel_fname(abs_file_path) read_only_stub_files.append(rel_file_path) - if not chat_files and not other_files and not read_only_files and not read_only_stub_files: + if not chat_files and not read_only_files and not read_only_stub_files: self.io.tool_output("\nNo files in chat, git repo, or read-only list.") return - if other_files: - self.io.tool_output("Repo files not in the chat:\n") - for file in other_files: - self.io.tool_output(f" {file}") + # if other_files: + # self.io.tool_output("Repo files not in the chat:\n") + # for file in other_files: + # self.io.tool_output(f" {file}") # Read-only files: if read_only_files or read_only_stub_files: diff --git a/aider/tui/app.py b/aider/tui/app.py index 8facf245b53..29403b248fe 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -5,11 +5,13 @@ from textual.app import App, ComposeResult from textual.binding import Binding +from textual.containers import Vertical from textual.theme import Theme from .widgets import ( AiderFooter, CompletionBar, + FileList, InputArea, KeyHints, OutputContainer, @@ -86,7 +88,11 @@ def compose(self) -> ComposeResult: # Git info loaded in on_mount to avoid blocking startup yield OutputContainer(id="output") yield StatusBar(id="status-bar") - yield InputArea(history_file=history_file, id="input") + yield Vertical( + InputArea(history_file=history_file, id="input"), + FileList(id="file-list", classes="empty"), + id="input-container", + ) yield KeyHints(id="key-hints") yield AiderFooter( model_name=model_name, @@ -247,6 +253,21 @@ def show_confirmation(self, msg): explicit_yes_required=options.get("explicit_yes_required", False), ) + def enable_input(self, msg): + """Enable input and update autocomplete data.""" + self.update_key_hints(generating=False) + input_area = self.query_one("#input", InputArea) + input_area.disabled = False # Ensure input is enabled + files = msg.get("files", []) + commands = msg.get("commands", []) + input_area.update_autocomplete_data(files, commands) + + # Update file list + file_list = self.query_one("#file-list", FileList) + file_list.update_files(msg.get("chat_files", {})) + + input_area.focus() + def update_spinner(self, msg): """Update spinner in footer.""" footer = self.query_one(AiderFooter) @@ -261,16 +282,6 @@ def update_spinner(self, msg): elif action == "stop": footer.stop_spinner() - def enable_input(self, msg): - """Enable input and update autocomplete data.""" - self.update_key_hints(generating=False) - input_area = self.query_one("#input", InputArea) - input_area.disabled = False # Ensure input is enabled - files = msg.get("files", []) - commands = msg.get("commands", []) - input_area.update_autocomplete_data(files, commands) - input_area.focus() - def show_error(self, message): """Show error notification.""" status_bar = self.query_one("#status-bar", StatusBar) diff --git a/aider/tui/io.py b/aider/tui/io.py index 5d0dcd33d1e..382453651f2 100644 --- a/aider/tui/io.py +++ b/aider/tui/io.py @@ -5,7 +5,7 @@ from rich.console import Console -from aider.io import InputOutput +from aider.io import InputOutput, get_rel_fname class TextualInputOutput(InputOutput): @@ -281,11 +281,27 @@ async def get_input( # Signal TUI that we're ready for input command_names = commands.get_commands() if commands else [] + # Process read-only files + rel_read_only_fnames = [] + if abs_read_only_fnames: + rel_read_only_fnames = [get_rel_fname(f, root) for f in abs_read_only_fnames] + + rel_read_only_stubs_fnames = [] + if abs_read_only_stubs_fnames: + rel_read_only_stubs_fnames = [ + get_rel_fname(f, root) for f in abs_read_only_stubs_fnames + ] + self.output_queue.put( { "type": "ready_for_input", "files": list(addable_rel_fnames) if addable_rel_fnames else [], "commands": command_names, + "chat_files": { + "rel_fnames": list(rel_fnames), + "rel_read_only_fnames": rel_read_only_fnames, + "rel_read_only_stubs_fnames": rel_read_only_stubs_fnames, + }, } ) diff --git a/aider/tui/styles.tcss b/aider/tui/styles.tcss index 334aaadb36d..82d2399e471 100644 --- a/aider/tui/styles.tcss +++ b/aider/tui/styles.tcss @@ -34,14 +34,24 @@ Screen { } /* Input area - floating card style */ -#input { +#input-container { height: auto; max-height: 33%; - min-height: 5; - background: $surface; - border: round $accent 50%; - padding: 0 0 0 2; + min-height: 0; + padding: 0 0 0 1; margin: 0 1 0 1; + border: round $accent 50%; + background: $surface; +} + +#input { + max-height: 1fr; + height: auto; + min-height: 3; + background: $surface; + border: none; + padding: 0 0 0 0; + margin: 0 2 0 0; scrollbar-size: 1 1; } @@ -65,6 +75,23 @@ Input > .input--placeholder, TextArea > .text-area--placeholder { background: $surface; } +/* File List below input */ +#file-list { + padding: 0 0 0 0; + margin: 0; + background: $surface; + color: $secondary; + height: auto; + min-height: 0; + dock: bottom +} + +#file-list.empty { + height: 0; + margin: 0; + padding: 0; + display: none; +} /* Key hints below input */ #key-hints { @@ -82,4 +109,4 @@ Input > .input--placeholder, TextArea > .text-area--placeholder { background: $surface; color: $accent; padding: 0 1; -} \ No newline at end of file +} diff --git a/aider/tui/widgets/__init__.py b/aider/tui/widgets/__init__.py index e5c287433bb..0750d40727b 100644 --- a/aider/tui/widgets/__init__.py +++ b/aider/tui/widgets/__init__.py @@ -1,6 +1,7 @@ """Widgets for the Aider TUI.""" from .completion_bar import CompletionBar +from .file_list import FileList from .footer import AiderFooter from .input_area import InputArea from .key_hints import KeyHints @@ -14,4 +15,5 @@ "KeyHints", "OutputContainer", "StatusBar", + "FileList", ] diff --git a/aider/tui/widgets/completion_bar.py b/aider/tui/widgets/completion_bar.py index 7bcfc70c73c..d233047b8ca 100644 --- a/aider/tui/widgets/completion_bar.py +++ b/aider/tui/widgets/completion_bar.py @@ -18,8 +18,8 @@ class CompletionBar(Widget, can_focus=False): CompletionBar { height: 1; background: $surface; - margin: 0 1; - padding: 0 1; + margin: 0 0; + padding: 0 0; layout: horizontal; } diff --git a/aider/tui/widgets/file_list.py b/aider/tui/widgets/file_list.py new file mode 100644 index 00000000000..05eb2f7e96e --- /dev/null +++ b/aider/tui/widgets/file_list.py @@ -0,0 +1,76 @@ +from rich.columns import Columns +from rich.console import Group +from textual.widgets import Static + + +class FileList(Static): + """Widget to display the list of files in chat.""" + + def update_files(self, chat_files): + """Update the file list display.""" + if not chat_files: + self.update("") + return + + rel_fnames = chat_files.get("rel_fnames", []) + rel_read_only_fnames = chat_files.get("rel_read_only_fnames", []) + rel_read_only_stubs_fnames = chat_files.get("rel_read_only_stubs_fnames", []) + + total_files = ( + len(rel_fnames) + + len(rel_read_only_fnames or []) + + len(rel_read_only_stubs_fnames or []) + ) + + if total_files == 0: + self.add_class("empty") + self.update("") + return + else: + self.remove_class("empty") + + # For very large numbers of files, use a summary display + if total_files > 20: + read_only_count = len(rel_read_only_fnames or []) + stub_file_count = len(rel_read_only_stubs_fnames or []) + editable_count = len([f for f in rel_fnames if f not in (rel_read_only_fnames or [])]) + + summary = f"{editable_count} editable file(s)" + if read_only_count > 0: + summary += f", {read_only_count} read-only file(s)" + if stub_file_count > 0: + summary += f", {stub_file_count} stub file(s)" + summary += " (use /ls to list all files)" + self.update(summary) + return + + renderables = [] + + # Handle read-only files + if rel_read_only_fnames or rel_read_only_stubs_fnames: + ro_paths = [] + # Regular read-only files + for rel_path in sorted(rel_read_only_fnames or []): + ro_paths.append(rel_path) + # Stub files with (stub) marker + for rel_path in sorted(rel_read_only_stubs_fnames or []): + ro_paths.append(f"{rel_path} (stub)") + + if ro_paths: + files_with_label = ["Readonly:"] + ro_paths + renderables.append(Columns(files_with_label)) + + # Handle editable files + editable_files = [ + f + for f in sorted(rel_fnames) + if f not in rel_read_only_fnames and f not in rel_read_only_stubs_fnames + ] + if editable_files: + files_with_label = editable_files + if rel_read_only_fnames or rel_read_only_stubs_fnames: + files_with_label = ["Editable:"] + editable_files + + renderables.append(Columns(files_with_label)) + + self.update(Group(*renderables)) From 9d2974d8340f01e21d47bc6245564242643ac08f Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 22:06:52 -0500 Subject: [PATCH 19/28] TUI Updates: - Grep Tool should respect tui output scheme for command run - run_cmd should poll for sub command output so it can actually finish - linter tests should account for run_cmd changes --- aider/run_cmd.py | 16 ++++++++++++---- aider/tools/grep.py | 22 ++++++++++++++++++---- tests/basic/test_linter.py | 15 ++++++++++++--- 3 files changed, 42 insertions(+), 11 deletions(-) diff --git a/aider/run_cmd.py b/aider/run_cmd.py index 6d6111f88f2..2de892f51a6 100644 --- a/aider/run_cmd.py +++ b/aider/run_cmd.py @@ -77,11 +77,19 @@ def run_cmd_subprocess( output = [] - for line in process.stdout: - output.append(line) + while True: + # Read one line (it will block until a newline or EOF is received) + line = process.stdout.readline() + + # Check if the line is empty AND the process has finished + if not line and process.poll() is not None: + break # Exit the loop if nothing more to read and process is done + + if line: + output.append(line) - if should_print: - print(line, end="", flush=True) + if should_print: + print(line, end="", flush=True) process.wait() return process.returncode, "".join(output) diff --git a/aider/tools/grep.py b/aider/tools/grep.py index 88eec568ac9..93102947faa 100644 --- a/aider/tools/grep.py +++ b/aider/tools/grep.py @@ -172,18 +172,28 @@ def execute( # Convert list to command string for run_cmd_subprocess command_string = oslex.join(cmd_args) + should_print = True + tui = None + if coder.tui and coder.tui(): + tui = coder.tui() + should_print = False + coder.io.tool_output(f"⚙️ Executing {tool_name}: {command_string}") # Use run_cmd_subprocess for execution # Note: rg, ag, and grep return 1 if no matches are found, which is not an error for this tool. exit_status, combined_output = run_cmd_subprocess( - command_string, verbose=coder.verbose, cwd=coder.root # Execute in the project root + command_string, + verbose=coder.verbose, + cwd=coder.root, + should_print=should_print, # Execute in the project root ) # Format the output for the result message output_content = combined_output or "" # Handle exit codes (consistent across rg, ag, grep) + result_message = "" if exit_status == 0: # Limit output size if necessary max_output_lines = 50 # Consider making this configurable @@ -200,11 +210,10 @@ def execute( result_message = "No matches found (unexpected)." else: result_message = f"Found matches:\n```text\n{output_content}\n```" - return result_message elif exit_status == 1: # Exit code 1 means no matches found - this is expected behavior, not an error. - return "No matches found." + result_message = "No matches found." else: # Exit code > 1 indicates an actual error error_message = ( @@ -219,7 +228,12 @@ def execute( ) error_message += f" Output:\n{output_content}" coder.io.tool_error(error_message) - return f"Error: {error_message}" + result_message = f"Error: {error_message}" + + if tui: + coder.io.tool_output(result_message) + + return result_message except Exception as e: # Add command_string to the error message if it's defined diff --git a/tests/basic/test_linter.py b/tests/basic/test_linter.py index 46b02a36774..c9dab58e2a1 100644 --- a/tests/basic/test_linter.py +++ b/tests/basic/test_linter.py @@ -31,7 +31,10 @@ def test_get_rel_fname(self): def test_run_cmd(self, mock_popen): mock_process = MagicMock() mock_process.returncode = 0 - mock_process.stdout.read.side_effect = ("", None) + # First readline returns empty string, second returns None + mock_process.stdout.readline.side_effect = ["", None] + # First poll returns None (process still running), second returns 0 (exit code) + mock_process.poll.side_effect = [None, 0] mock_popen.return_value = mock_process result = self.linter.run_cmd("test_cmd", "test_file.py", "code") @@ -51,7 +54,10 @@ def test_run_cmd_win(self): def test_run_cmd_with_errors(self, mock_popen): mock_process = MagicMock() mock_process.returncode = 1 - mock_process.stdout.read.side_effect = ("Error message", None) + # First readline returns error, second returns empty string, third returns None + mock_process.stdout.readline.side_effect = ["Error message", "", None] + # First poll returns None (process still running), second returns 1 (exit code) + mock_process.poll.side_effect = [None, 1] mock_popen.return_value = mock_process result = self.linter.run_cmd("test_cmd", "test_file.py", "code") @@ -62,7 +68,10 @@ def test_run_cmd_with_special_chars(self): with patch("subprocess.Popen") as mock_popen: mock_process = MagicMock() mock_process.returncode = 1 - mock_process.stdout.read.side_effect = ("Error message", None) + # First readline returns error, second returns empty string, third returns None + mock_process.stdout.readline.side_effect = ["Error message", "", None] + # First poll returns None (process still running), second returns 1 (exit code) + mock_process.poll.side_effect = [None, 1] mock_popen.return_value = mock_process # Test with a file path containing special characters From fb9e34355c1c49a17549a1108f353915d6c9b98e Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 22:13:54 -0500 Subject: [PATCH 20/28] TUI Updates: Make clear commands behave intuitively --- aider/commands.py | 8 ++++++++ aider/tui/app.py | 2 ++ 2 files changed, 10 insertions(+) diff --git a/aider/commands.py b/aider/commands.py index 338ea3cfdb8..2b6e3f16152 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -465,6 +465,10 @@ def cmd_clear(self, args): "Clear the chat history" self._clear_chat_history() + + if self.coder.tui and self.coder.tui(): + self.coder.tui().action_clear_output() + self.io.tool_output("All chat history cleared.") def _drop_all_files(self): @@ -494,6 +498,10 @@ def cmd_reset(self, args): "Drop all files and clear the chat history" self._drop_all_files() self._clear_chat_history() + + if self.coder.tui and self.coder.tui(): + self.coder.tui().action_clear_output() + self.io.tool_output("All files dropped and chat history cleared.") def cmd_tokens(self, args): diff --git a/aider/tui/app.py b/aider/tui/app.py index 29403b248fe..6fb07aa2def 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -315,6 +315,8 @@ def action_clear_output(self): """Clear all output.""" output_container = self.query_one("#output", OutputContainer) output_container.clear_output() + output_container.add_output(self.BANNER, dim=False) + self.worker.coder.show_announcements() def action_interrupt(self): """Interrupt the current task.""" From 568eac36b05b26a53305799478a5b0dd672c5a00 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 22:58:58 -0500 Subject: [PATCH 21/28] Add configuration and documentation --- README.md | 5 +- aider/args.py | 22 ++++-- aider/main.py | 5 +- aider/tui/__init__.py | 4 +- aider/tui/app.py | 100 ++++++++++++++++++++---- aider/website/docs/config/agent-mode.md | 63 +++++++++------ aider/website/docs/config/tui.md | 87 +++++++++++++++++++++ 7 files changed, 237 insertions(+), 49 deletions(-) create mode 100644 aider/website/docs/config/tui.md diff --git a/README.md b/README.md index e3de8adcc6d..e79afb420ea 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ LLMs are a part of our lives from here on out so join us in learning about and c * [Agent Mode](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/agent-mode.md) * [MCP Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/mcp.md) +* [TUI Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/tui.md) * [Session Management](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/sessions.md) * [Skills](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/skills.md) * [Aider Original Documentation (still mostly applies)](https://aider.chat/) @@ -138,9 +139,9 @@ The current priorities are to improve core capabilities and user experience of t * [ ] Add support for partial files and code snippets in model completion messages 5. **TUI Experience** - [Discussion](https://github.com/dwash96/aider-ce/issues/48) - * [ ] Add a full TUI (probably using textual) to have a visual interface competitive with the other coding agent terminal programs + * [x] Add a full TUI (probably using textual) to have a visual interface competitive with the other coding agent terminal programs * [x] Re-integrate pretty output formatting - * [ ] Implement a response area, a prompt area with current auto completion capabilities, and a helper area for management utility commands + * [x] Implement a response area, a prompt area with current auto completion capabilities, and a helper area for managing utility commands 6. **Agent Mode** - [Discussion](https://github.com/dwash96/aider-ce/issues/111) * [x] Renaming "navigator mode" to "agent mode" for simplicity diff --git a/aider/args.py b/aider/args.py index b1d716a4bb2..0f0957be1d2 100644 --- a/aider/args.py +++ b/aider/args.py @@ -218,8 +218,22 @@ def get_parser(default_config_files, git_root): ), ) + ######## + group = parser.add_argument_group("TUI Settings") + group.add_argument( + "--tui", + action="store_true", + default=False, + help="Launch Textual TUI interface (experimental)", + ) + group.add_argument( + "--tui-config", + metavar="TUI_CONFIG_JSON", + help="Specify TUI Mode configuration as a JSON string", + default=None, + ) ######### - group = parser.add_argument_group("Agent settings") + group = parser.add_argument_group("Agent Settings") group.add_argument( "--agent-config", metavar="AGENT_CONFIG_JSON", @@ -613,12 +627,6 @@ def get_parser(default_config_files, git_root): default=False, help="Perform a dry run without modifying files (default: False)", ) - group.add_argument( - "--tui", - action="store_true", - default=False, - help="Launch Textual TUI interface (experimental)", - ) group.add_argument( "--skip-sanity-check-repo", action="store_true", diff --git a/aider/main.py b/aider/main.py index daee334f84d..20eea8f9dac 100644 --- a/aider/main.py +++ b/aider/main.py @@ -596,6 +596,9 @@ async def main_async(argv=None, input=None, output=None, force_git_root=None, re if hasattr(args, "agent_config") and args.agent_config is not None: args.agent_config = convert_yaml_to_json_string(args.agent_config) + if hasattr(args, "tui_config") and args.tui_config is not None: + args.tui_config = convert_yaml_to_json_string(args.tui_config) + if hasattr(args, "mcp_servers") and args.mcp_servers is not None: args.mcp_servers = convert_yaml_to_json_string(args.mcp_servers) @@ -1264,7 +1267,7 @@ def get_io(pretty): if args.tui: from aider.tui import launch_tui - return_code = await launch_tui(coder, output_queue, input_queue) + return_code = await launch_tui(coder, output_queue, input_queue, args) return await graceful_exit(coder, return_code) # Standard CLI mode - main loop diff --git a/aider/tui/__init__.py b/aider/tui/__init__.py index f3107c3be42..e63c5878017 100644 --- a/aider/tui/__init__.py +++ b/aider/tui/__init__.py @@ -61,7 +61,7 @@ def create_tui_io(args, editing_mode): return io, output_queue, input_queue -async def launch_tui(coder, output_queue, input_queue): +async def launch_tui(coder, output_queue, input_queue, args): """Launch the TUI application. Args: @@ -73,7 +73,7 @@ async def launch_tui(coder, output_queue, input_queue): Exit code from TUI """ worker = CoderWorker(coder, output_queue, input_queue) - app = TUI(worker, output_queue, input_queue) + app = TUI(worker, output_queue, input_queue, args) # Set weak reference to TUI app on the coder instance coder.tui = weakref.ref(app) diff --git a/aider/tui/app.py b/aider/tui/app.py index 6fb07aa2def..489f5687069 100644 --- a/aider/tui/app.py +++ b/aider/tui/app.py @@ -1,6 +1,7 @@ """Main Textual application for Aider TUI.""" import concurrent.futures +import json import queue from textual.app import App, ComposeResult @@ -31,39 +32,108 @@ class TUI(App): Binding("escape", "interrupt", "Interrupt", show=True), ] - def __init__(self, coder_worker, output_queue, input_queue): + def __init__(self, coder_worker, output_queue, input_queue, args): """Initialize the Aider TUI app.""" super().__init__() self.worker = coder_worker self.output_queue = output_queue self.input_queue = input_queue + self.args = args # Store args for _get_config # Cache for code symbols (functions, classes, variables) self._symbols_cache = None self._symbols_files_hash = None - # Register and set aider theme + self.tui_config = self._get_config() + + # Register and set aider theme using config colors + colors = self.tui_config.get("colors", {}) + other = self.tui_config.get("other", {}) BASE_THEME = Theme( name="aider", - primary="#00ff5f", - secondary="#888888", - accent="#00ff87", # Cecli green - foreground="#ffffff", - background="#1e1e1e", - success="#00aa00", - warning="#ffd700", - error="#ff3333", - surface="transparent", # Slightly lighter than background - panel="transparent", - dark=True, + primary=colors.get("primary", "#00ff5f"), + secondary=colors.get("secondary", "#888888"), + accent=colors.get("accent", "#00ff87"), # Cecli green + foreground=colors.get("foreground", "#ffffff"), + background=colors.get("background", "#1e1e1e"), + success=colors.get("success", "#00aa00"), + warning=colors.get("warning", "#ffd700"), + error=colors.get("error", "#ff3333"), + surface=colors.get("surface", "transparent"), # Slightly lighter than background + panel=colors.get("panel", "transparent"), + dark=other.get("dark", True), variables={ - "input-cursor-foreground": "#00ff87", - "input-cursor-text-style": "underline", + "input-cursor-foreground": colors.get("input-cursor-foreground", "#00ff87"), + "input-cursor-text-style": other.get("input-cursor-text-style", "underline"), }, ) self.register_theme(BASE_THEME) self.theme = "aider" + def _get_config(self): + """ + Parse and return TUI configuration from args.tui_config. + + Returns: + dict: TUI configuration with defaults for missing values + """ + config = {} + + # Check if tui_config is provided via args + if ( + hasattr(self, "args") + and self.args + and hasattr(self.args, "tui_config") + and self.args.tui_config + ): + try: + config = json.loads(self.args.tui_config) + except (json.JSONDecodeError, TypeError) as e: + # Can't use self.io here since it doesn't exist yet + # The error will be handled elsewhere if needed + print(f"Warning: Failed to parse tui-config JSON: {e}") + # Continue with empty config, will apply defaults below + + # Ensure config has a colors entry with nested structure matching BASE_THEME + if "colors" not in config: + config["colors"] = {} + + if "other" not in config: + config["other"] = {} + + # Ensure colors dict has all expected keys with default values + default_colors = { + "primary": "#00ff5f", + "secondary": "#888888", + "accent": "#00ff87", + "foreground": "#ffffff", + "background": "#1e1e1e", + "success": "#00aa00", + "warning": "#ffd700", + "error": "#ff3333", + "surface": "transparent", + "panel": "transparent", + "dark": True, + "variables": { + "input-cursor-foreground": "#00ff87", + "input-cursor-text-style": "underline", + }, + } + + # Merge default colors with user-provided colors + for key, default_value in default_colors.items(): + if key not in config["colors"]: + config["colors"][key] = default_value + elif key == "variables" and isinstance(default_value, dict): + # Handle nested variables dict + if "variables" not in config["colors"]: + config["colors"]["variables"] = {} + for var_key, var_default in default_value.items(): + if var_key not in config["colors"]["variables"]: + config["colors"]["variables"][var_key] = var_default + + return config + def compose(self) -> ComposeResult: """Create child widgets.""" coder = self.worker.coder diff --git a/aider/website/docs/config/agent-mode.md b/aider/website/docs/config/agent-mode.md index 67f480849bf..c0953b755e0 100644 --- a/aider/website/docs/config/agent-mode.md +++ b/aider/website/docs/config/agent-mode.md @@ -146,9 +146,31 @@ Arguments: {} ``` ### Agent Configuration - Agent Mode can be configured using the `--agent-config` command line argument, which accepts a JSON string for fine-grained control over tool availability and behavior. +Agent Mode can also be configured directly in the relevant config.yml file: + +```yaml +agent: true +agent-config: + # Tool configuration + tools_includelist: ["view", "makeeditable", "replacetext", "finished"] # Optional: Whitelist of tools + tools_excludelist: ["command", "commandinteractive"] # Optional: Blacklist of tools + + # Context blocks configuration + include_context_blocks: ["todo_list", "git_status"] # Optional: Context blocks to include + exclude_context_blocks: ["symbol_outline", "directory_structure"] # Optional: Context blocks to exclude + + # Performance and behavior settings + large_file_token_threshold: 12500 # Token threshold for large file warnings + skip_cli_confirmations: false # YOLO mode - be brave and let the LLM cook + + # Skills configuration (see Skills documentation for details) + skills_paths: ["~/my-skills", "./project-skills"] # Directories to search for skills + skills_includelist: ["python-refactoring", "react-components"] # Optional: Whitelist of skills to include + skills_excludelist: ["legacy-tools"] # Optional: Blacklist of skills to exclude +``` + #### Configuration Options - **`large_file_token_threshold`**: Maximum token threshold for large file warnings (default: 25000) @@ -190,8 +212,7 @@ preserve-todo-list: true use-enhanced-map: true ``` - -#### Configuration Example +#### Complete Configuration Example Complete configuration example in YAML configuration file (`.aider.conf.yml` or `~/.aider.conf.yml`): @@ -200,25 +221,23 @@ Complete configuration example in YAML configuration file (`.aider.conf.yml` or agent: true # Agent Mode configuration -agent-config: | - { - # Tool configuration - "tools_includelist": ["view", "makeeditable", "replacetext", "finished"], # Optional: Whitelist of tools - "tools_excludelist": ["command", "commandinteractive"], # Optional: Blacklist of tools - - # Context blocks configuration - "include_context_blocks": ["todo_list", "git_status"], # Optional: Context blocks to include - "exclude_context_blocks": ["symbol_outline", "directory_structure"], # Optional: Context blocks to exclude - - # Performance and behavior settings - "large_file_token_threshold": 12500, # Token threshold for large file warnings - "skip_cli_confirmations": false, # YOLO mode - be brave and let the LLM cook - - # Skills configuration (see Skills documentation for details) - "skills_paths": ["~/my-skills", "./project-skills"], # Directories to search for skills - "skills_includelist": ["python-refactoring", "react-components"], # Optional: Whitelist of skills to include - "skills_excludelist": ["legacy-tools"] # Optional: Blacklist of skills to exclude - } +agent-config: + # Tool configuration + tools_includelist: ["view", "makeeditable", "replacetext", "finished"] # Optional: Whitelist of tools + tools_excludelist: ["command", "commandinteractive"] # Optional: Blacklist of tools + + # Context blocks configuration + include_context_blocks: ["todo_list", "git_status"] # Optional: Context blocks to include + exclude_context_blocks: ["symbol_outline", "directory_structure"] # Optional: Context blocks to exclude + + # Performance and behavior settings + large_file_token_threshold: 12500 # Token threshold for large file warnings + skip_cli_confirmations: false # YOLO mode - be brave and let the LLM cook + + # Skills configuration (see Skills documentation for details) + skills_paths: ["~/my-skills", "./project-skills"] # Directories to search for skills + skills_includelist: ["python-refactoring", "react-components"] # Optional: Whitelist of skills to include + skills_excludelist: ["legacy-tools"] # Optional: Blacklist of skills to exclude # Other Agent Mode options preserve-todo-list: true # Preserve todo list across sessions diff --git a/aider/website/docs/config/tui.md b/aider/website/docs/config/tui.md new file mode 100644 index 00000000000..49d7e43741b --- /dev/null +++ b/aider/website/docs/config/tui.md @@ -0,0 +1,87 @@ +# TUI Mode + +TUI (Textual User Interface) Mode provides a modern, visually rich terminal interface for AI pair programming. + +## Activation + +Command line: +``` +aider-ce ... --tui + +### OR! + +cecli ... --tui +``` + +## Configuration + +TUI Mode can be configured directly in the relevant config.json file or with JSON in the command line arguments: + +### Minimal Configuration + +```yaml +tui: true +``` + +### Complete Configuration Example + +Complete configuration example in YAML configuration file (`.aider.conf.yml` or `~/.aider.conf.yml`). The base theme is pretty nice but if you want different colors, do you thing: + +```yaml +tui: true +tui-config: + colors: + primary: "#00ff5f" + secondary: "#888888" + accent: "#00ff87" + foreground: "#ffffff" + background: "#1e1e1e" + success: "#00aa00" + warning: "#ffd700" + error: "#ff3333" + surface: "transparent" + panel: "transparent" + dark: true + variables: + input-cursor-foreground: "#00ff87" + other: + dark: true + input-cursor-text-style: "underline" + +``` + +## Benefits + +- **Improved Productivity**: Reduced context switching with all information visible at once +- **Better Organization**: Clear separation of concerns between input, output, and status +- **Enhanced Readability**: Proper formatting and syntax highlighting for code discussions +- **Real-time Feedback**: Immediate visual feedback for all operations +- **Modern Interface**: Familiar UI patterns that reduce cognitive load +- **Accessibility**: Full keyboard navigation without requiring mouse interaction + +## Integration with Other Modes + +TUI Mode works seamlessly with other aider-ce features: + +- **Agent Mode**: Visual feedback for tool calls and autonomous operations +- **Skills**: Clean display of skill outputs and interactions +- **MCP Servers**: Integrated display of MCP tool outputs +- **Git Operations**: Real-time git status updates in the footer + +## Troubleshooting + +### Common Issues + +1. **TUI not starting**: Ensure your terminal supports True Color (24-bit color) +2. **Display issues**: Try resizing your terminal window +3. **Performance problems**: Reduce terminal refresh rate or disable animations +4. **Input lag**: Check for conflicts with terminal multiplexers (tmux, screen) + +### Terminal Requirements + +- **True Color Support**: Required for proper color rendering +- **Minimum Size**: 80x24 terminal size recommended +- **Unicode Support**: Required for proper symbol display +- **Modern Terminal**: Recommended: Kitty, WezTerm, iTerm2, or Windows Terminal + +TUI Mode represents a significant evolution in cecli's user experience, providing a modern, efficient interface for AI pair programming while maintaining the power and flexibility of the command-line foundation. Ideally, this mode makes ai-enabled programming more colorful and more fun for us all! \ No newline at end of file From cb823d331ad31dc1f244666817419c244b8b88a7 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 23:24:32 -0500 Subject: [PATCH 22/28] Update package imports so textual exists in the core app, remove gui mode entirely since it's kinda mid --- .github/workflows/ubuntu-tests.yml | 3 +- .github/workflows/windows-tests.yml | 2 +- aider/args.py | 7 - aider/gui.py | 545 -------------------------- aider/website/docs/install/docker.md | 35 +- pyproject.toml | 1 - requirements.txt | 27 +- requirements/common-constraints.txt | 51 +-- requirements/requirements-browser.in | 1 - requirements/requirements-browser.txt | 159 -------- requirements/requirements-tui.in | 1 - requirements/requirements.in | 1 + scripts/pip-compile.sh | 2 +- tests/tools/test_grep.py | 1 + 14 files changed, 50 insertions(+), 786 deletions(-) delete mode 100755 aider/gui.py delete mode 100644 requirements/requirements-browser.in delete mode 100644 requirements/requirements-browser.txt delete mode 100644 requirements/requirements-tui.in diff --git a/.github/workflows/ubuntu-tests.yml b/.github/workflows/ubuntu-tests.yml index 66e03f9c44d..e26127489a3 100644 --- a/.github/workflows/ubuntu-tests.yml +++ b/.github/workflows/ubuntu-tests.yml @@ -51,10 +51,9 @@ jobs: pytest \ pytest-asyncio \ -r requirements/requirements.in \ - -r requirements/requirements-browser.in \ -r requirements/requirements-help.in \ -r requirements/requirements-playwright.in \ - ".[browser,help,playwright]" + ".[help,playwright]" - name: Run tests env: diff --git a/.github/workflows/windows-tests.yml b/.github/workflows/windows-tests.yml index 3d81acbe4ad..c5e3f12d988 100644 --- a/.github/workflows/windows-tests.yml +++ b/.github/workflows/windows-tests.yml @@ -42,7 +42,7 @@ jobs: run: | python -m pip install --upgrade pip pip install uv - uv pip install --system pytest pytest-asyncio -r requirements/requirements.in -r requirements/requirements-browser.in -r requirements/requirements-help.in -r requirements/requirements-playwright.in '.[browser,help,playwright]' + uv pip install --system pytest pytest-asyncio -r requirements/requirements.in -r requirements/requirements-help.in -r requirements/requirements-playwright.in '.[help,playwright]' - name: Run tests env: diff --git a/aider/args.py b/aider/args.py index 0f0957be1d2..bc0474299da 100644 --- a/aider/args.py +++ b/aider/args.py @@ -739,13 +739,6 @@ def get_parser(default_config_files, git_root): " (disables chat mode)" ), ).complete = shtab.FILE - group.add_argument( - "--gui", - "--browser", - action=argparse.BooleanOptionalAction, - help=argparse.SUPPRESS, - default=False, - ) group.add_argument( "--copy-paste", action=argparse.BooleanOptionalAction, diff --git a/aider/gui.py b/aider/gui.py deleted file mode 100755 index 6c5b012dc49..00000000000 --- a/aider/gui.py +++ /dev/null @@ -1,545 +0,0 @@ -#!/usr/bin/env python - -import os -import random -import sys - -import streamlit as st - -from aider import urls -from aider.coders import Coder -from aider.dump import dump # noqa: F401 -from aider.io import InputOutput -from aider.main import main as cli_main -from aider.scrape import Scraper, has_playwright - - -class CaptureIO(InputOutput): - lines = [] - - def tool_output(self, msg, log_only=False): - if not log_only: - self.lines.append(msg) - super().tool_output(msg, log_only=log_only) - - def tool_error(self, msg): - self.lines.append(msg) - super().tool_error(msg) - - def tool_warning(self, msg): - self.lines.append(msg) - super().tool_warning(msg) - - def get_captured_lines(self): - lines = self.lines - self.lines = [] - return lines - - -def search(text=None): - results = [] - for root, _, files in os.walk("aider"): - for file in files: - path = os.path.join(root, file) - if not text or text in path: - results.append(path) - # dump(results) - - return results - - -# Keep state as a resource, which survives browser reloads (since Coder does too) -class State: - keys = set() - - def init(self, key, val=None): - if key in self.keys: - return - - self.keys.add(key) - setattr(self, key, val) - return True - - -@st.cache_resource -def get_state(): - return State() - - -@st.cache_resource -def get_coder(): - coder = cli_main(return_coder=True) - if not isinstance(coder, Coder): - raise ValueError(coder) - if not coder.repo: - raise ValueError("GUI can currently only be used inside a git repo") - - io = CaptureIO( - pretty=False, - yes=True, - dry_run=coder.io.dry_run, - encoding=coder.io.encoding, - ) - # coder.io = io # this breaks the input_history - coder.commands.io = io - - for line in coder.get_announcements(): - coder.io.tool_output(line) - - return coder - - -class GUI: - prompt = None - prompt_as = "user" - last_undo_empty = None - recent_msgs_empty = None - web_content_empty = None - - def announce(self): - lines = self.coder.get_announcements() - lines = " \n".join(lines) - return lines - - def show_edit_info(self, edit): - commit_hash = edit.get("commit_hash") - commit_message = edit.get("commit_message") - diff = edit.get("diff") - fnames = edit.get("fnames") - if fnames: - fnames = sorted(fnames) - - if not commit_hash and not fnames: - return - - show_undo = False - res = "" - if commit_hash: - res += f"Commit `{commit_hash}`: {commit_message} \n" - if commit_hash == self.coder.last_aider_commit_hash: - show_undo = True - - if fnames: - fnames = [f"`{fname}`" for fname in fnames] - fnames = ", ".join(fnames) - res += f"Applied edits to {fnames}." - - if diff: - with st.expander(res): - st.code(diff, language="diff") - if show_undo: - self.add_undo(commit_hash) - else: - with st.container(border=True): - st.write(res) - if show_undo: - self.add_undo(commit_hash) - - def add_undo(self, commit_hash): - if self.last_undo_empty: - self.last_undo_empty.empty() - - self.last_undo_empty = st.empty() - undone = self.state.last_undone_commit_hash == commit_hash - if not undone: - with self.last_undo_empty: - if self.button(f"Undo commit `{commit_hash}`", key=f"undo_{commit_hash}"): - self.do_undo(commit_hash) - - def do_sidebar(self): - with st.sidebar: - st.title("Aider") - # self.cmds_tab, self.settings_tab = st.tabs(["Commands", "Settings"]) - - # self.do_recommended_actions() - self.do_add_to_chat() - self.do_recent_msgs() - self.do_clear_chat_history() - # st.container(height=150, border=False) - # st.write("### Experimental") - - st.warning( - "This browser version of aider is experimental. Please share feedback in [GitHub" - " issues](https://github.com/Aider-AI/aider/issues)." - ) - - def do_settings_tab(self): - pass - - def do_recommended_actions(self): - text = "Aider works best when your code is stored in a git repo. \n" - text += f"[See the FAQ for more info]({urls.git})" - - with st.expander("Recommended actions", expanded=True): - with st.popover("Create a git repo to track changes"): - st.write(text) - self.button("Create git repo", key=random.random(), help="?") - - with st.popover("Update your `.gitignore` file"): - st.write("It's best to keep aider's internal files out of your git repo.") - self.button("Add `.aider*` to `.gitignore`", key=random.random(), help="?") - - def do_add_to_chat(self): - # with st.expander("Add to the chat", expanded=True): - self.do_add_files() - self.do_add_web_page() - - def do_add_files(self): - fnames = st.multiselect( - "Add files to the chat", - self.coder.get_all_relative_files(), - default=self.state.initial_inchat_files, - placeholder="Files to edit", - disabled=self.prompt_pending(), - help=( - "Only add the files that need to be *edited* for the task you are working" - " on. Aider will pull in other relevant code to provide context to the LLM." - ), - ) - - for fname in fnames: - if fname not in self.coder.get_inchat_relative_files(): - self.coder.add_rel_fname(fname) - self.info(f"Added {fname} to the chat") - - for fname in self.coder.get_inchat_relative_files(): - if fname not in fnames: - self.coder.drop_rel_fname(fname) - self.info(f"Removed {fname} from the chat") - - def do_add_web_page(self): - with st.popover("Add a web page to the chat"): - self.do_web() - - def do_add_image(self): - with st.popover("Add image"): - st.markdown("Hello World 👋") - st.file_uploader("Image file", disabled=self.prompt_pending()) - - def do_run_shell(self): - with st.popover("Run shell commands, tests, etc"): - st.markdown( - "Run a shell command and optionally share the output with the LLM. This is" - " a great way to run your program or run tests and have the LLM fix bugs." - ) - st.text_input("Command:") - st.radio( - "Share the command output with the LLM?", - [ - "Review the output and decide whether to share", - "Automatically share the output on non-zero exit code (ie, if any tests fail)", - ], - ) - st.selectbox( - "Recent commands", - [ - "my_app.py --doit", - "my_app.py --cleanup", - ], - disabled=self.prompt_pending(), - ) - - def do_tokens_and_cost(self): - with st.expander("Tokens and costs", expanded=True): - pass - - def do_show_token_usage(self): - with st.popover("Show token usage"): - st.write("hi") - - def do_clear_chat_history(self): - text = "Saves tokens, reduces confusion" - if self.button("Clear chat history", help=text): - self.coder.done_messages = [] - self.coder.cur_messages = [] - self.info("Cleared chat history. Now the LLM can't see anything before this line.") - - def do_show_metrics(self): - st.metric("Cost of last message send & reply", "$0.0019", help="foo") - st.metric("Cost to send next message", "$0.0013", help="foo") - st.metric("Total cost this session", "$0.22") - - def do_git(self): - with st.expander("Git", expanded=False): - # st.button("Show last diff") - # st.button("Undo last commit") - self.button("Commit any pending changes") - with st.popover("Run git command"): - st.markdown("## Run git command") - st.text_input("git", value="git ") - self.button("Run") - st.selectbox( - "Recent git commands", - [ - "git checkout -b experiment", - "git stash", - ], - disabled=self.prompt_pending(), - ) - - def do_recent_msgs(self): - if not self.recent_msgs_empty: - self.recent_msgs_empty = st.empty() - - if self.prompt_pending(): - self.recent_msgs_empty.empty() - self.state.recent_msgs_num += 1 - - with self.recent_msgs_empty: - self.old_prompt = st.selectbox( - "Resend a recent chat message", - self.state.input_history, - placeholder="Choose a recent chat message", - # label_visibility="collapsed", - index=None, - key=f"recent_msgs_{self.state.recent_msgs_num}", - disabled=self.prompt_pending(), - ) - if self.old_prompt: - self.prompt = self.old_prompt - - def do_messages_container(self): - self.messages = st.container() - - # stuff a bunch of vertical whitespace at the top - # to get all the chat text to the bottom - # self.messages.container(height=300, border=False) - - with self.messages: - for msg in self.state.messages: - role = msg["role"] - - if role == "edit": - self.show_edit_info(msg) - elif role == "info": - st.info(msg["content"]) - elif role == "text": - text = msg["content"] - line = text.splitlines()[0] - with self.messages.expander(line): - st.text(text) - elif role in ("user", "assistant"): - with st.chat_message(role): - st.write(msg["content"]) - # self.cost() - else: - st.dict(msg) - - def initialize_state(self): - messages = [ - dict(role="info", content=self.announce()), - dict(role="assistant", content="How can I help you?"), - ] - - self.state.init("messages", messages) - self.state.init("last_aider_commit_hash", self.coder.last_aider_commit_hash) - self.state.init("last_undone_commit_hash") - self.state.init("recent_msgs_num", 0) - self.state.init("web_content_num", 0) - self.state.init("prompt") - self.state.init("scraper") - - self.state.init("initial_inchat_files", self.coder.get_inchat_relative_files()) - - if "input_history" not in self.state.keys: - input_history = list(self.coder.io.get_input_history()) - seen = set() - input_history = [x for x in input_history if not (x in seen or seen.add(x))] - self.state.input_history = input_history - self.state.keys.add("input_history") - - def button(self, args, **kwargs): - "Create a button, disabled if prompt pending" - - # Force everything to be disabled if there is a prompt pending - if self.prompt_pending(): - kwargs["disabled"] = True - - return st.button(args, **kwargs) - - def __init__(self): - self.coder = get_coder() - self.state = get_state() - - # Force the coder to cooperate, regardless of cmd line args - self.coder.yield_stream = True - self.coder.stream = True - self.coder.pretty = False - - self.initialize_state() - - self.do_messages_container() - self.do_sidebar() - - user_inp = st.chat_input("Say something") - if user_inp: - self.prompt = user_inp - - if self.prompt_pending(): - self.process_chat() - - if not self.prompt: - return - - self.state.prompt = self.prompt - - if self.prompt_as == "user": - self.coder.io.add_to_input_history(self.prompt) - - self.state.input_history.append(self.prompt) - - if self.prompt_as: - self.state.messages.append({"role": self.prompt_as, "content": self.prompt}) - if self.prompt_as == "user": - with self.messages.chat_message("user"): - st.write(self.prompt) - elif self.prompt_as == "text": - line = self.prompt.splitlines()[0] - line += "??" - with self.messages.expander(line): - st.text(self.prompt) - - # re-render the UI for the prompt_pending state - st.rerun() - - def prompt_pending(self): - return self.state.prompt is not None - - def cost(self): - cost = random.random() * 0.003 + 0.001 - st.caption(f"${cost:0.4f}") - - def process_chat(self): - prompt = self.state.prompt - self.state.prompt = None - - # This duplicates logic from within Coder - self.num_reflections = 0 - self.max_reflections = 3 - - while prompt: - with self.messages.chat_message("assistant"): - res = st.write_stream(self.coder.run_stream(prompt)) - self.state.messages.append({"role": "assistant", "content": res}) - # self.cost() - - prompt = None - if self.coder.reflected_message: - if self.num_reflections < self.max_reflections: - self.num_reflections += 1 - self.info(self.coder.reflected_message) - prompt = self.coder.reflected_message - - with self.messages: - edit = dict( - role="edit", - fnames=self.coder.aider_edited_files, - ) - if self.state.last_aider_commit_hash != self.coder.last_aider_commit_hash: - edit["commit_hash"] = self.coder.last_aider_commit_hash - edit["commit_message"] = self.coder.last_aider_commit_message - commits = f"{self.coder.last_aider_commit_hash}~1" - diff = self.coder.repo.diff_commits( - self.coder.pretty, - commits, - self.coder.last_aider_commit_hash, - ) - edit["diff"] = diff - self.state.last_aider_commit_hash = self.coder.last_aider_commit_hash - - self.state.messages.append(edit) - self.show_edit_info(edit) - - # re-render the UI for the non-prompt_pending state - st.rerun() - - def info(self, message, echo=True): - info = dict(role="info", content=message) - self.state.messages.append(info) - - # We will render the tail of the messages array after this call - if echo: - self.messages.info(message) - - def do_web(self): - st.markdown("Add the text content of a web page to the chat") - - if not self.web_content_empty: - self.web_content_empty = st.empty() - - if self.prompt_pending(): - self.web_content_empty.empty() - self.state.web_content_num += 1 - - with self.web_content_empty: - self.web_content = st.text_input( - "URL", - placeholder="https://...", - key=f"web_content_{self.state.web_content_num}", - ) - - if not self.web_content: - return - - url = self.web_content - - if not self.state.scraper: - self.scraper = Scraper(print_error=self.info, playwright_available=has_playwright()) - - content = self.scraper.scrape(url) or "" - if content.strip(): - content = f"{url}\n\n" + content - self.prompt = content - self.prompt_as = "text" - else: - self.info(f"No web content found for `{url}`.") - self.web_content = None - - def do_undo(self, commit_hash): - self.last_undo_empty.empty() - - if ( - self.state.last_aider_commit_hash != commit_hash - or self.coder.last_aider_commit_hash != commit_hash - ): - self.info(f"Commit `{commit_hash}` is not the latest commit.") - return - - self.coder.commands.io.get_captured_lines() - reply = self.coder.commands.cmd_undo(None) - lines = self.coder.commands.io.get_captured_lines() - - lines = "\n".join(lines) - lines = lines.splitlines() - lines = " \n".join(lines) - self.info(lines, echo=False) - - self.state.last_undone_commit_hash = commit_hash - - if reply: - self.prompt_as = None - self.prompt = reply - - -def gui_main(): - st.set_page_config( - layout="wide", - page_title="Aider", - page_icon=urls.favicon, - menu_items={ - "Get Help": urls.website, - "Report a bug": "https://github.com/Aider-AI/aider/issues", - "About": "# Aider\nAI pair programming in your browser.", - }, - ) - - # config_options = st.config._config_options - # for key, value in config_options.items(): - # print(f"{key}: {value.value}") - - GUI() - - -if __name__ == "__main__": - status = gui_main() - sys.exit(status) diff --git a/aider/website/docs/install/docker.md b/aider/website/docs/install/docker.md index f63a413c4ad..cea11721761 100644 --- a/aider/website/docs/install/docker.md +++ b/aider/website/docs/install/docker.md @@ -3,32 +3,24 @@ parent: Installation nav_order: 100 --- -# Aider with docker +# cecli with docker -Aider is available as 2 docker images: +Cecli is available as a docker image: -- `paulgauthier/aider` installs the aider core, a smaller image that's good to get started quickly. -- `paulgauthier/aider-full` installs aider will all the optional extras. - -The full image has support for features like interactive help, the -browser GUI and support for using Playwright to scrape web pages. The -core image can still use these features, but they will need to be -installed the first time you access them. Since containers are -ephemeral, the extras will need to be reinstalled the next time you -launch the aider core container. +- `dustinwashington/aider-ce` installs the cecli core, a smaller image that's good to get started quickly. ### Aider core -``` -docker pull paulgauthier/aider -docker run -it --user $(id -u):$(id -g) --volume $(pwd):/app paulgauthier/aider --openai-api-key $OPENAI_API_KEY [...other aider args...] -``` - -### Full version - -``` -docker pull paulgauthier/aider-full -docker run -it --user $(id -u):$(id -g) --volume $(pwd):/app paulgauthier/aider-full --openai-api-key $OPENAI_API_KEY [...other aider args...] +```bash +docker pull dustinwashington/aider-ce +docker run \ + -it \ + --user $(id -u):$(id -g) \ + --volume $(pwd):/app dustinwashington/aider-ce \ + --volume $(pwd)/.aider.conf.yml:/.aider.conf.yml \ + --volume $(pwd)/.aider.env:/.aider/.env \ + [...other args...] \ + --config /app/.aider.conf.yml ``` ## How to use it @@ -50,7 +42,6 @@ git config user.email "you@example.com" git config user.name "Your Name" ``` - ## Limitations - When you use the in-chat `/run` command, it will be running shell commands *inside the docker container*. So those commands won't be running in your local environment, which may make it tricky to `/run` tests, etc for your project. diff --git a/pyproject.toml b/pyproject.toml index d43fbe87e2d..a41dd027c94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,6 @@ dependencies = { file = "requirements/requirements.in" } [tool.setuptools.dynamic.optional-dependencies] dev = { file = "requirements/requirements-dev.in" } help = { file = "requirements/requirements-help.in" } -browser = { file = "requirements/requirements-browser.in" } playwright = { file = "requirements/requirements-playwright.in" } tui = { file = "requirements/requirements-tui.in" } diff --git a/requirements.txt b/requirements.txt index 45df632bcf2..75b3f015291 100644 --- a/requirements.txt +++ b/requirements.txt @@ -231,14 +231,20 @@ jsonschema-specifications==2025.9.1 # via # -c requirements/common-constraints.txt # jsonschema +linkify-it-py==2.0.3 + # via + # -c requirements/common-constraints.txt + # markdown-it-py litellm==1.80.5 # via # -c requirements/common-constraints.txt # -r requirements/requirements.in -markdown-it-py==4.0.0 +markdown-it-py[linkify]==4.0.0 # via # -c requirements/common-constraints.txt + # mdit-py-plugins # rich + # textual markupsafe==3.0.3 # via # -c requirements/common-constraints.txt @@ -251,6 +257,10 @@ mcp==1.22.0 # via # -c requirements/common-constraints.txt # -r requirements/requirements.in +mdit-py-plugins==0.5.0 + # via + # -c requirements/common-constraints.txt + # textual mdurl==0.1.2 # via # -c requirements/common-constraints.txt @@ -299,6 +309,10 @@ pillow==12.0.0 # via # -c requirements/common-constraints.txt # -r requirements/requirements.in +platformdirs==4.5.0 + # via + # -c requirements/common-constraints.txt + # textual prompt-toolkit==3.0.52 # via # -c requirements/common-constraints.txt @@ -375,6 +389,7 @@ pygments==2.19.2 # via # -c requirements/common-constraints.txt # rich + # textual pyjwt[crypto]==2.10.1 # via # -c requirements/common-constraints.txt @@ -424,6 +439,7 @@ rich==14.2.0 # via # -c requirements/common-constraints.txt # -r requirements/requirements.in + # textual rpds-py==0.29.0 # via # -c requirements/common-constraints.txt @@ -474,6 +490,10 @@ starlette==0.50.0 # via # -c requirements/common-constraints.txt # mcp +textual==6.8.0 + # via + # -c requirements/common-constraints.txt + # -r requirements/requirements.in tiktoken==0.12.0 # via # -c requirements/common-constraints.txt @@ -527,6 +547,7 @@ typing-extensions==4.15.0 # pydantic-core # referencing # starlette + # textual # typing-inspection typing-inspection==0.4.2 # via @@ -534,6 +555,10 @@ typing-inspection==0.4.2 # mcp # pydantic # pydantic-settings +uc-micro-py==1.0.3 + # via + # -c requirements/common-constraints.txt + # linkify-it-py uritemplate==4.2.0 # via # -c requirements/common-constraints.txt diff --git a/requirements/common-constraints.txt b/requirements/common-constraints.txt index e41bd38f4fc..900a557cee1 100644 --- a/requirements/common-constraints.txt +++ b/requirements/common-constraints.txt @@ -1,5 +1,5 @@ # This file was autogenerated by uv via the following command: -# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-browser.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in requirements/requirements-tui.in +# uv pip compile --no-strip-extras --output-file=requirements/common-constraints.txt requirements/requirements.in requirements/requirements-dev.in requirements/requirements-help.in requirements/requirements-playwright.in aiohappyeyeballs==2.6.1 # via aiohttp aiohttp==3.13.2 @@ -11,8 +11,6 @@ aiosignal==1.4.0 # via aiohttp aiosqlite==0.21.0 # via llama-index-core -altair==5.5.0 - # via streamlit annotated-types==0.7.0 # via pydantic anyio==4.11.0 @@ -34,14 +32,10 @@ banks==2.2.0 # via llama-index-core beautifulsoup4==4.14.2 # via -r requirements/requirements.in -blinker==1.9.0 - # via streamlit build==1.3.0 # via pip-tools cachetools==6.2.2 - # via - # google-auth - # streamlit + # via google-auth certifi==2025.11.12 # via # httpcore @@ -61,7 +55,6 @@ click==8.3.1 # litellm # nltk # pip-tools - # streamlit # typer # uvicorn codespell==2.4.1 @@ -121,9 +114,7 @@ fsspec==2025.10.0 gitdb==4.0.12 # via gitpython gitpython==3.1.45 - # via - # -r requirements/requirements.in - # streamlit + # via -r requirements/requirements.in google-ai-generativelanguage==0.6.15 # via google-generativeai google-api-core[grpc]==2.28.1 @@ -220,10 +211,8 @@ iniconfig==2.3.0 # via pytest jinja2==3.1.6 # via - # altair # banks # litellm - # pydeck # torch jiter==0.12.0 # via openai @@ -236,7 +225,6 @@ json5==0.12.1 jsonschema==4.25.1 # via # -r requirements/requirements.in - # altair # litellm # mcp jsonschema-specifications==2025.9.1 @@ -286,8 +274,6 @@ multidict==6.7.0 # yarl mypy-extensions==1.1.0 # via typing-inspect -narwhals==2.12.0 - # via altair nest-asyncio==1.6.0 # via llama-index-core networkx==3.6 @@ -306,11 +292,9 @@ numpy==2.3.5 # llama-index-core # matplotlib # pandas - # pydeck # scikit-learn # scipy # soundfile - # streamlit # transformers nvidia-cublas-cu12==12.8.4.1 # via @@ -358,19 +342,15 @@ oslex==0.1.3 packaging==25.0 # via # -r requirements/requirements.in - # altair # build # google-cloud-bigquery # huggingface-hub # marshmallow # matplotlib # pytest - # streamlit # transformers pandas==2.3.3 - # via - # -r requirements/requirements-dev.in - # streamlit + # via -r requirements/requirements-dev.in pathspec==0.12.1 # via # -r requirements/requirements.in @@ -383,7 +363,6 @@ pillow==12.0.0 # llama-index-core # matplotlib # sentence-transformers - # streamlit pip==25.3 # via pip-tools pip-tools==7.5.2 @@ -418,13 +397,10 @@ protobuf==5.29.5 # googleapis-common-protos # grpcio-status # proto-plus - # streamlit psutil==7.1.3 # via -r requirements/requirements.in ptyprocess==0.7.0 # via pexpect -pyarrow==21.0.0 - # via streamlit pyasn1==0.6.1 # via # pyasn1-modules @@ -450,8 +426,6 @@ pydantic-core==2.41.5 # via pydantic pydantic-settings==2.12.0 # via mcp -pydeck==0.9.1 - # via streamlit pydub==0.25.1 # via -r requirements/requirements.in pyee==13.0.0 @@ -521,7 +495,6 @@ requests==2.32.5 # google-cloud-bigquery # huggingface-hub # llama-index-core - # streamlit # tiktoken # transformers rich==14.2.0 @@ -579,16 +552,12 @@ sse-starlette==3.0.3 # via mcp starlette==0.50.0 # via mcp -streamlit==1.51.0 - # via -r requirements/requirements-browser.in sympy==1.14.0 # via torch tenacity==9.1.2 - # via - # llama-index-core - # streamlit + # via llama-index-core textual==6.8.0 - # via -r requirements/requirements-tui.in + # via -r requirements/requirements.in threadpoolctl==3.6.0 # via scikit-learn tiktoken==0.12.0 @@ -599,12 +568,8 @@ tokenizers==0.22.1 # via # litellm # transformers -toml==0.10.2 - # via streamlit torch==2.9.1 # via sentence-transformers -tornado==6.5.2 - # via streamlit tqdm==4.67.1 # via # google-generativeai @@ -638,7 +603,6 @@ typing-extensions==4.15.0 # via # aiosignal # aiosqlite - # altair # anyio # beautifulsoup4 # google-generativeai @@ -656,7 +620,6 @@ typing-extensions==4.15.0 # sentence-transformers # sqlalchemy # starlette - # streamlit # textual # torch # typer @@ -685,8 +648,6 @@ uvicorn==0.38.0 # via mcp virtualenv==20.35.4 # via pre-commit -watchdog==6.0.0 - # via streamlit watchfiles==1.1.1 # via -r requirements/requirements.in wcwidth==0.2.14 diff --git a/requirements/requirements-browser.in b/requirements/requirements-browser.in deleted file mode 100644 index 12a4706528d..00000000000 --- a/requirements/requirements-browser.in +++ /dev/null @@ -1 +0,0 @@ -streamlit diff --git a/requirements/requirements-browser.txt b/requirements/requirements-browser.txt deleted file mode 100644 index c3eba12883d..00000000000 --- a/requirements/requirements-browser.txt +++ /dev/null @@ -1,159 +0,0 @@ -# This file was autogenerated by uv via the following command: -# uv pip compile --no-strip-extras --constraint=requirements/common-constraints.txt --output-file=requirements/requirements-browser.txt requirements/requirements-browser.in -altair==5.5.0 - # via - # -c requirements/common-constraints.txt - # streamlit -attrs==25.4.0 - # via - # -c requirements/common-constraints.txt - # jsonschema - # referencing -blinker==1.9.0 - # via - # -c requirements/common-constraints.txt - # streamlit -cachetools==6.2.2 - # via - # -c requirements/common-constraints.txt - # streamlit -certifi==2025.11.12 - # via - # -c requirements/common-constraints.txt - # requests -charset-normalizer==3.4.4 - # via - # -c requirements/common-constraints.txt - # requests -click==8.3.1 - # via - # -c requirements/common-constraints.txt - # streamlit -gitdb==4.0.12 - # via - # -c requirements/common-constraints.txt - # gitpython -gitpython==3.1.45 - # via - # -c requirements/common-constraints.txt - # streamlit -idna==3.11 - # via - # -c requirements/common-constraints.txt - # requests -jinja2==3.1.6 - # via - # -c requirements/common-constraints.txt - # altair - # pydeck -jsonschema==4.25.1 - # via - # -c requirements/common-constraints.txt - # altair -jsonschema-specifications==2025.9.1 - # via - # -c requirements/common-constraints.txt - # jsonschema -markupsafe==3.0.3 - # via - # -c requirements/common-constraints.txt - # jinja2 -narwhals==2.12.0 - # via - # -c requirements/common-constraints.txt - # altair -numpy==2.3.5 - # via - # -c requirements/common-constraints.txt - # pandas - # pydeck - # streamlit -packaging==25.0 - # via - # -c requirements/common-constraints.txt - # altair - # streamlit -pandas==2.3.3 - # via - # -c requirements/common-constraints.txt - # streamlit -pillow==12.0.0 - # via - # -c requirements/common-constraints.txt - # streamlit -protobuf==5.29.5 - # via - # -c requirements/common-constraints.txt - # streamlit -pyarrow==21.0.0 - # via - # -c requirements/common-constraints.txt - # streamlit -pydeck==0.9.1 - # via - # -c requirements/common-constraints.txt - # streamlit -python-dateutil==2.9.0.post0 - # via - # -c requirements/common-constraints.txt - # pandas -pytz==2025.2 - # via - # -c requirements/common-constraints.txt - # pandas -referencing==0.37.0 - # via - # -c requirements/common-constraints.txt - # jsonschema - # jsonschema-specifications -requests==2.32.5 - # via - # -c requirements/common-constraints.txt - # streamlit -rpds-py==0.29.0 - # via - # -c requirements/common-constraints.txt - # jsonschema - # referencing -six==1.17.0 - # via - # -c requirements/common-constraints.txt - # python-dateutil -smmap==5.0.2 - # via - # -c requirements/common-constraints.txt - # gitdb -streamlit==1.51.0 - # via - # -c requirements/common-constraints.txt - # -r requirements/requirements-browser.in -tenacity==9.1.2 - # via - # -c requirements/common-constraints.txt - # streamlit -toml==0.10.2 - # via - # -c requirements/common-constraints.txt - # streamlit -tornado==6.5.2 - # via - # -c requirements/common-constraints.txt - # streamlit -typing-extensions==4.15.0 - # via - # -c requirements/common-constraints.txt - # altair - # referencing - # streamlit -tzdata==2025.2 - # via - # -c requirements/common-constraints.txt - # pandas -urllib3==2.5.0 - # via - # -c requirements/common-constraints.txt - # requests -watchdog==6.0.0 - # via - # -c requirements/common-constraints.txt - # streamlit diff --git a/requirements/requirements-tui.in b/requirements/requirements-tui.in deleted file mode 100644 index d649decc98c..00000000000 --- a/requirements/requirements-tui.in +++ /dev/null @@ -1 +0,0 @@ -textual>=0.50.0 diff --git a/requirements/requirements.in b/requirements/requirements.in index 001f665813f..d5195b87d9c 100644 --- a/requirements/requirements.in +++ b/requirements/requirements.in @@ -29,6 +29,7 @@ shtab>=1.7.2 oslex>=0.1.3 google-generativeai>=0.8.5 mcp>=1.12.3 +textual>=6.0.0 truststore # The proper dependency is networkx[default], but this brings diff --git a/scripts/pip-compile.sh b/scripts/pip-compile.sh index e1e1e512bdd..dfcf91ca4ef 100755 --- a/scripts/pip-compile.sh +++ b/scripts/pip-compile.sh @@ -30,7 +30,7 @@ grep -v ^tree-sitter= tmp.requirements.txt \ > requirements.txt # Compile additional requirements files -SUFFIXES=(dev help browser playwright) +SUFFIXES=(dev help playwright) for SUFFIX in "${SUFFIXES[@]}"; do uv pip compile \ diff --git a/tests/tools/test_grep.py b/tests/tools/test_grep.py index 207301ebb21..67ea4938db2 100644 --- a/tests/tools/test_grep.py +++ b/tests/tools/test_grep.py @@ -31,6 +31,7 @@ def test_dash_prefixed_pattern_is_searched_literally(search_term, tmp_path, monk ), verbose=False, root=str(tmp_path), + tui=lambda: None, ) monkeypatch.setattr(grep.Tool, "_find_search_tool", lambda: ("rg", shutil.which("rg"))) From 0d96a70890a16510c7865241165a7acdfec99b88 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 23:28:41 -0500 Subject: [PATCH 23/28] Bump Version --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 60d74603d5a..ce04b1d2e50 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.89.7.dev" +__version__ = "0.90.0.dev" safe_version = __version__ try: From 55c769cc696d78abaa9ae0e43dfd75ac4edf6793 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 14 Dec 2025 23:33:18 -0500 Subject: [PATCH 24/28] TUI Update: Placeholder text --- aider/tui/widgets/input_area.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/tui/widgets/input_area.py b/aider/tui/widgets/input_area.py index 6d489a9a7bf..69451513d45 100644 --- a/aider/tui/widgets/input_area.py +++ b/aider/tui/widgets/input_area.py @@ -54,7 +54,7 @@ def __init__(self, history_file: str = None, **kwargs): # Let's assume kwargs might handle it or we set it. # Actually, let's just set the default if it's empty. if not self.placeholder: - self.placeholder = "> Type your message... (ctrl+s to send, enter for new line)" + self.placeholder = "> Type your message... (ctrl+s to submit, enter for new line)" self.files = [] self.commands = [] From 772bfbac59078d60d725b41ecfa9e065e8236bd3 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Mon, 15 Dec 2025 00:53:32 -0500 Subject: [PATCH 25/28] #256: Advanced model setting with configuration and tag suffixes --- aider/args.py | 17 + aider/main.py | 153 ++- aider/models.py | 63 +- aider/website/docs/config/model-aliases.md | 76 +- tests/basic/test_models.py | 1376 +++++++++++--------- 5 files changed, 1048 insertions(+), 637 deletions(-) diff --git a/aider/args.py b/aider/args.py index bc0474299da..077706d5600 100644 --- a/aider/args.py +++ b/aider/args.py @@ -119,6 +119,23 @@ def get_parser(default_config_files, git_root): metavar="ALIAS:MODEL", help="Add a model alias (can be used multiple times)", ) + group.add_argument( + "--model-overrides", + metavar="MODEL_OVERRIDES_JSON", + help=( + 'Specify model tag overrides directly as JSON/YAML string (e.g., \'{"gpt-4o": {"high":' + ' {"temperature": 0.8}}}\')' + ), + default=None, + ) + group.add_argument( + "--model-overrides-file", + metavar="MODEL_OVERRIDES_FILE", + default=".aider.model.overrides.yml", + help=( + "Specify a file with model tag overrides (e.g., gpt-4o:high -> reasoning_effort: high)" + ), + ).complete = shtab.FILE group.add_argument( "--reasoning-effort", type=str, diff --git a/aider/main.py b/aider/main.py index 20eea8f9dac..1d0a1161326 100644 --- a/aider/main.py +++ b/aider/main.py @@ -400,6 +400,80 @@ def register_litellm_models(git_root, model_metadata_fname, io, verbose=False): return 1 +def load_model_overrides(git_root, model_overrides_fname, io, verbose=False): + """Load model tag overrides from a YAML file.""" + from pathlib import Path + + import yaml + + model_overrides_files = generate_search_path_list( + ".aider.model.overrides.yml", git_root, model_overrides_fname + ) + + overrides = {} + files_loaded = [] + + for fname in model_overrides_files: + try: + if Path(fname).exists(): + with open(fname, "r") as f: + content = yaml.safe_load(f) + if content: + # Merge overrides, later files override earlier ones + for model_name, tags in content.items(): + if model_name not in overrides: + overrides[model_name] = {} + overrides[model_name].update(tags) + files_loaded.append(fname) + except Exception as e: + io.tool_error(f"Error loading model overrides from {fname}: {e}") + + if len(files_loaded) > 0 and verbose: + io.tool_output("Loaded model overrides from:") + for file_loaded in files_loaded: + io.tool_output(f" - {file_loaded}") + + if ( + model_overrides_fname + and model_overrides_fname not in files_loaded + and model_overrides_fname != ".aider.model.overrides.yml" + ): + io.tool_warning(f"Model Overrides File Not Found: {model_overrides_fname}") + + return overrides + + +def load_model_overrides_from_string(model_overrides_str, io): + """Load model tag overrides from a JSON/YAML string.""" + import json + + import yaml + + overrides = {} + + if not model_overrides_str: + return overrides + + try: + # First try to parse as JSON + try: + content = json.loads(model_overrides_str) + except json.JSONDecodeError: + # If JSON fails, try YAML + content = yaml.safe_load(model_overrides_str) + + if content and isinstance(content, dict): + for model_name, tags in content.items(): + if model_name not in overrides: + overrides[model_name] = {} + overrides[model_name].update(tags) + + return overrides + except Exception as e: + io.tool_error(f"Error parsing model overrides string: {e}") + return {} + + async def sanity_check_repo(repo, io): if not repo: return True @@ -867,10 +941,76 @@ def get_io(pretty): return await graceful_exit(None, 1) args.model = selected_model_name # Update args with the selected model + # Load model overrides if specified + model_overrides = {} + + # First load from file if specified + if args.model_overrides_file: + model_overrides = load_model_overrides( + git_root, args.model_overrides_file, io, verbose=args.verbose + ) + + # Then load from direct JSON/YAML string if specified (overrides file) + if args.model_overrides: + direct_overrides = load_model_overrides_from_string(args.model_overrides, io) + # Merge direct overrides with file overrides (direct takes precedence) + for model_name, tags in direct_overrides.items(): + if model_name not in model_overrides: + model_overrides[model_name] = {} + model_overrides[model_name].update(tags) + + # Parse model names with suffixes and apply overrides + def parse_model_with_suffix(model_name, overrides): + """Parse model name with optional :suffix and apply overrides.""" + if not model_name: + return model_name, {} + + # Split on last colon to get model name and suffix + if ":" in model_name: + base_model, suffix = model_name.rsplit(":", 1) + else: + base_model, suffix = model_name, None + + # Apply overrides if suffix exists + override_kwargs = {} + if suffix and base_model in overrides and suffix in overrides[base_model]: + override_kwargs = overrides[base_model][suffix].copy() + + return base_model, override_kwargs + + # Parse main model + main_model_name, main_model_overrides = parse_model_with_suffix(args.model, model_overrides) + weak_model_name, weak_model_overrides = parse_model_with_suffix( + args.weak_model, model_overrides + ) + editor_model_name, editor_model_overrides = parse_model_with_suffix( + args.editor_model, model_overrides + ) + + # Create weak model if specified with overrides + weak_model_obj = None + if weak_model_name: + weak_model_obj = models.Model( + weak_model_name, + weak_model=False, + verbose=args.verbose, + override_kwargs=weak_model_overrides, + ) + + # Create editor model if specified with overrides + editor_model_obj = None + if editor_model_name: + editor_model_obj = models.Model( + editor_model_name, + editor_model=False, + verbose=args.verbose, + override_kwargs=editor_model_overrides, + ) + # Check if an OpenRouter model was selected/specified but the key is missing - if args.model.startswith("openrouter/") and not os.environ.get("OPENROUTER_API_KEY"): + if main_model_name.startswith("openrouter/") and not os.environ.get("OPENROUTER_API_KEY"): io.tool_warning( - f"The specified model '{args.model}' requires an OpenRouter API key, which was not" + f"The specified model '{main_model_name}' requires an OpenRouter API key, which was not" " found." ) # Attempt OAuth flow because the specific model needs it @@ -891,7 +1031,7 @@ def get_io(pretty): else: # OAuth failed or was declined by the user io.tool_error( - f"Unable to proceed without an OpenRouter API key for model '{args.model}'." + f"Unable to proceed without an OpenRouter API key for model '{main_model_name}'." ) await io.offer_url( urls.models_and_keys, "Open documentation URL for more info?", acknowledge=True @@ -899,11 +1039,12 @@ def get_io(pretty): return await graceful_exit(None, 1) main_model = models.Model( - args.model, - weak_model=args.weak_model, - editor_model=args.editor_model, + main_model_name, + weak_model=weak_model_obj, + editor_model=editor_model_obj, editor_edit_format=args.editor_edit_format, verbose=args.verbose, + override_kwargs=main_model_overrides, ) # Check if deprecated remove_reasoning is set diff --git a/aider/models.py b/aider/models.py index ac2b9925c37..a3bc3024539 100644 --- a/aider/models.py +++ b/aider/models.py @@ -311,13 +311,20 @@ def fetch_openrouter_model_info(self, model): class Model(ModelSettings): def __init__( - self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False + self, + model, + weak_model=None, + editor_model=None, + editor_edit_format=None, + verbose=False, + override_kwargs=None, ): # Map any alias to its canonical name model = MODEL_ALIASES.get(model, model) self.name = model self.verbose = verbose + self.override_kwargs = override_kwargs or {} self.max_chat_history_tokens = 1024 self.weak_model = None @@ -341,10 +348,7 @@ def __init__( self.max_chat_history_tokens = min(max(max_input_tokens / 16, 1024), 8192) self.configure_model_settings(model) - if weak_model is False: - self.weak_model_name = None - else: - self.get_weak_model(weak_model) + self.get_weak_model(weak_model) if editor_model is False: self.editor_model_name = None @@ -413,6 +417,21 @@ def configure_model_settings(self, model): if "reasoning_effort" not in self.accepts_settings: self.accepts_settings.append("reasoning_effort") + # Apply override kwargs from model-overrides configuration + if self.override_kwargs: + # Initialize extra_params if it doesn't exist + if not self.extra_params: + self.extra_params = {} + + # Deep merge the override kwargs + for key, value in self.override_kwargs.items(): + if isinstance(value, dict) and isinstance(self.extra_params.get(key), dict): + # For nested dicts, merge recursively + self.extra_params[key] = {**self.extra_params[key], **value} + else: + # For non-dict values, simply update + self.extra_params[key] = value + def apply_generic_model_settings(self, model): if "/o3-mini" in model: self.edit_format = "diff" @@ -571,10 +590,22 @@ def apply_generic_model_settings(self, model): def __str__(self): return self.name - def get_weak_model(self, provided_weak_model_name): - # If weak_model_name is provided, override the model settings - if provided_weak_model_name: - self.weak_model_name = provided_weak_model_name + def get_weak_model(self, provided_weak_model): + # If provided_weak_model is False, set weak_model to self + if provided_weak_model is False: + self.weak_model = self + self.weak_model_name = None + return + + # If provided_weak_model is already a Model object, use it directly + if isinstance(provided_weak_model, Model): + self.weak_model = provided_weak_model + self.weak_model_name = provided_weak_model.name + return + + # If weak_model_name is provided as a string, override the model settings + if provided_weak_model: + self.weak_model_name = provided_weak_model if not self.weak_model_name: self.weak_model = self @@ -593,10 +624,16 @@ def get_weak_model(self, provided_weak_model_name): def commit_message_models(self): return [self.weak_model, self] - def get_editor_model(self, provided_editor_model_name, editor_edit_format): - # If editor_model_name is provided, override the model settings - if provided_editor_model_name: - self.editor_model_name = provided_editor_model_name + def get_editor_model(self, provided_editor_model, editor_edit_format): + # If provided_editor_model is already a Model object, use it directly + if isinstance(provided_editor_model, Model): + self.editor_model = provided_editor_model + self.editor_model_name = provided_editor_model.name + else: + # If editor_model_name is provided as a string, override the model settings + if provided_editor_model: + self.editor_model_name = provided_editor_model + if editor_edit_format: self.editor_edit_format = editor_edit_format diff --git a/aider/website/docs/config/model-aliases.md b/aider/website/docs/config/model-aliases.md index c27b34da002..f87542ee60e 100644 --- a/aider/website/docs/config/model-aliases.md +++ b/aider/website/docs/config/model-aliases.md @@ -13,7 +13,7 @@ Model aliases allow you to create shorthand names for models you frequently use. You can define aliases when launching aider using the `--alias` option: ```bash -aider --alias "fast:gpt-4o-mini" --alias "smart:o3-mini" +aider --alias "fast:gpt-5-mini" --alias "smart:o3-mini" ``` Multiple aliases can be defined by using the `--alias` option multiple times. Each alias definition should be in the format `alias:model-name`. @@ -25,7 +25,7 @@ you can also define aliases in your [`.aider.conf.yml` file](https://aider.chat/ ```yaml alias: - - "fast:gpt-4o-mini" + - "fast:gpt-5-mini" - "smart:o3-mini" - "hacker:claude-3-sonnet-20240229" ``` @@ -35,7 +35,7 @@ alias: Once defined, you can use the alias instead of the full model name from the command line: ```bash -aider --model fast # Uses gpt-4o-mini +aider --model fast # Uses gpt-5-mini aider --model smart # Uses o3-mini ``` @@ -51,7 +51,7 @@ Repo-map: using 4096 tokens, files refresh > /model fast Aider v0.75.3 -Main model: gpt-4o-mini with diff edit format +Main model: gpt-5-mini with diff edit format ───────────────────────────────────────────────────────────────────────────────────────────────────── diff> /model smart @@ -78,6 +78,7 @@ for alias, model in sorted(MODEL_ALIASES.items()): - `4`: gpt-4-0613 - `4-turbo`: gpt-4-1106-preview - `4o`: gpt-4o +- `5`: gpt-5 - `deepseek`: deepseek/deepseek-chat - `flash`: gemini/gemini-2.5-flash - `flash-lite`: gemini/gemini-2.5-flash-lite @@ -94,6 +95,71 @@ for alias, model in sorted(MODEL_ALIASES.items()): - `sonnet`: anthropic/claude-sonnet-4-20250514 +## Advanced Model Settings + +Aider-CE/Cecli supports model names with colon-separated suffixes (e.g., `gpt-5:high`) that map to additional configuration parameters defined in the relevant config.yml file. This allows you to create named configurations for different use cases. These configurations map precisely to the LiteLLM `completion()` method parameters [here](https://docs.litellm.ai/docs/completion/input), though more are supported for specific models and providers. + +### Configuration File + +Add a structure like the following to your config.yml file or create a `.aider.model.overrides.yml` file (or specify a different file with `--model-overrides-file` if there are global defaults you want): + +```yaml +model-overrides: + gpt-5: + high: # Use with: --model gpt-5:high + temperature: 0.8 + top_p: 0.9 + extra_body: + reasoning_effort: high + low: # Use with: --model gpt-5:low + temperature: 0.2 + top_p: 0.5 + creative: # Use with: --model gpt-5:creative + temperature: 0.9 + top_p: 0.95 + frequency_penalty: 0.5 + + claude-4-5-sonnet: + fast: # Use with: --model claude-3-5-sonnet:fast + temperature: 0.3 + detailed: # Use with: --model claude-3-5-sonnet:detailed + temperature: 0.7 + thinking_tokens: 4096 +``` + +### Usage + +You can use these suffixes with any model argument: + +```bash +# Main model with high reasoning effort (using file) +aider --model gpt-5:high --model-overrides-file .aider.model.overrides.yml + +# Main model with high reasoning effort (using direct JSON/YAML) +aider --model gpt-5:high --model-overrides '{"gpt-5": {"high": {"temperature": 0.8, "top_p": 0.9, "extra_body": {"reasoning_effort": "high"}}}}' + +# Different configurations for main and weak models +aider --model claude-3-5-sonnet:detailed --weak-model claude-3-5-sonnet:fast + +# Editor model with creative settings +aider --model gpt-5 --editor-model gpt-5:creative +``` + +### How It Works + +1. When you specify a model with a suffix (e.g., `gpt-5:high`), Aider splits it into the base model name (`gpt-5`) and suffix (`high`). +2. It looks up the suffix in the overrides file for that model. +3. The corresponding configuration parameters are applied to the model's API calls. +4. The parameters are deep-merged into the model's existing settings, with overrides taking precedence. + +### Priority + +Model overrides work alongside aliases. For example, you can use: +- `aider --model fast:high` (if `fast` is an alias for `gpt-5-mini`) +- `aider --model sonnet:detailed` (if `sonnet` is an alias for `anthropic/claude-sonnet-4-20250514`) + +The suffix is applied after alias resolution. + ## Priority If the same alias is defined in multiple places, the priority is: @@ -103,3 +169,5 @@ If the same alias is defined in multiple places, the priority is: 3. Built-in aliases (lowest priority) This allows you to override built-in aliases with your own preferences. + +Model overrides with suffixes provide an additional layer of configuration that works alongside aliases, giving you fine-grained control over model parameters for different use cases. diff --git a/tests/basic/test_models.py b/tests/basic/test_models.py index a0a576130a0..bcf9d9db183 100644 --- a/tests/basic/test_models.py +++ b/tests/basic/test_models.py @@ -1,614 +1,762 @@ -import unittest -from unittest.mock import ANY, MagicMock, patch - -from aider.models import ( - ANTHROPIC_BETA_HEADER, - Model, - ModelInfoManager, - register_models, - sanity_check_model, - sanity_check_models, -) - - -class TestModels(unittest.TestCase): - def setUp(self): - """Reset MODEL_SETTINGS before each test""" - from aider.models import MODEL_SETTINGS - - self._original_settings = MODEL_SETTINGS.copy() - - def tearDown(self): - """Restore original MODEL_SETTINGS after each test""" - from aider.models import MODEL_SETTINGS - - MODEL_SETTINGS.clear() - MODEL_SETTINGS.extend(self._original_settings) - - def test_get_model_info_nonexistent(self): - manager = ModelInfoManager() - info = manager.get_model_info("non-existent-model") - self.assertEqual(info, {}) - - def test_max_context_tokens(self): - model = Model("gpt-3.5-turbo") - self.assertEqual(model.info["max_input_tokens"], 16385) - - model = Model("gpt-3.5-turbo-16k") - self.assertEqual(model.info["max_input_tokens"], 16385) - - model = Model("gpt-3.5-turbo-1106") - self.assertEqual(model.info["max_input_tokens"], 16385) - - model = Model("gpt-4") - self.assertEqual(model.info["max_input_tokens"], 8 * 1024) - - model = Model("gpt-4-32k") - self.assertEqual(model.info["max_input_tokens"], 32 * 1024) - - model = Model("gpt-4-0613") - self.assertEqual(model.info["max_input_tokens"], 8 * 1024) - - @patch("os.environ") - async def test_sanity_check_model_all_set(self, mock_environ): - mock_environ.get.return_value = "dummy_value" - mock_io = MagicMock() - model = MagicMock() - model.name = "test-model" - model.missing_keys = ["API_KEY1", "API_KEY2"] - model.keys_in_environment = True - model.info = {"some": "info"} - - await sanity_check_model(mock_io, model) - - mock_io.tool_output.assert_called() - calls = mock_io.tool_output.call_args_list - self.assertIn("- API_KEY1: Set", str(calls)) - self.assertIn("- API_KEY2: Set", str(calls)) - - @patch("os.environ") - async def test_sanity_check_model_not_set(self, mock_environ): - mock_environ.get.return_value = "" - mock_io = MagicMock() - model = MagicMock() - model.name = "test-model" - model.missing_keys = ["API_KEY1", "API_KEY2"] - model.keys_in_environment = True - model.info = {"some": "info"} - - await sanity_check_model(mock_io, model) - - mock_io.tool_output.assert_called() - calls = mock_io.tool_output.call_args_list - self.assertIn("- API_KEY1: Not set", str(calls)) - self.assertIn("- API_KEY2: Not set", str(calls)) - - async def test_sanity_check_models_bogus_editor(self): - mock_io = MagicMock() - main_model = Model("gpt-4") - main_model.editor_model = Model("bogus-model") - - result = await sanity_check_models(mock_io, main_model) - - self.assertTrue( - result - ) # Should return True because there's a problem with the editor model - mock_io.tool_warning.assert_called_with(ANY) # Ensure a warning was issued - - warning_messages = [ - warning_call.args[0] for warning_call in mock_io.tool_warning.call_args_list - ] - print("Warning messages:", warning_messages) # Add this line - - self.assertGreaterEqual(mock_io.tool_warning.call_count, 1) # Expect two warnings - self.assertTrue( - any("bogus-model" in msg for msg in warning_messages) - ) # Check that one of the warnings mentions the bogus model - - @patch("aider.models.check_for_dependencies") - async def test_sanity_check_model_calls_check_dependencies(self, mock_check_deps): - """Test that sanity_check_model calls check_for_dependencies""" - mock_io = MagicMock() - model = MagicMock() - model.name = "test-model" - model.missing_keys = [] - model.keys_in_environment = True - model.info = {"some": "info"} - - await sanity_check_model(mock_io, model) - - # Verify check_for_dependencies was called with the model name - mock_check_deps.assert_called_once_with(mock_io, "test-model") - - def test_model_aliases(self): - # Test common aliases - model = Model("4") - self.assertEqual(model.name, "gpt-4-0613") - - model = Model("4o") - self.assertEqual(model.name, "gpt-4o") - - model = Model("35turbo") - self.assertEqual(model.name, "gpt-3.5-turbo") - - model = Model("35-turbo") - self.assertEqual(model.name, "gpt-3.5-turbo") - - model = Model("3") - self.assertEqual(model.name, "gpt-3.5-turbo") - - model = Model("sonnet") - self.assertEqual(model.name, "anthropic/claude-sonnet-4-20250514") - - model = Model("haiku") - self.assertEqual(model.name, "claude-3-5-haiku-20241022") - - model = Model("opus") - self.assertEqual(model.name, "claude-opus-4-20250514") - - # Test non-alias passes through unchanged - model = Model("gpt-4") - self.assertEqual(model.name, "gpt-4") - - def test_o1_use_temp_false(self): - # Test GitHub Copilot models - model = Model("github/o1-mini") - self.assertEqual(model.name, "github/o1-mini") - self.assertEqual(model.use_temperature, False) - - model = Model("github/o1-preview") - self.assertEqual(model.name, "github/o1-preview") - self.assertEqual(model.use_temperature, False) - - def test_parse_token_value(self): - # Create a model instance to test the parse_token_value method - model = Model("gpt-4") - - # Test integer inputs - self.assertEqual(model.parse_token_value(8096), 8096) - self.assertEqual(model.parse_token_value(1000), 1000) - - # Test string inputs - self.assertEqual(model.parse_token_value("8096"), 8096) - - # Test k/K suffix (kilobytes) - self.assertEqual(model.parse_token_value("8k"), 8 * 1024) - self.assertEqual(model.parse_token_value("8K"), 8 * 1024) - self.assertEqual(model.parse_token_value("10.5k"), 10.5 * 1024) - self.assertEqual(model.parse_token_value("0.5K"), 0.5 * 1024) - - # Test m/M suffix (megabytes) - self.assertEqual(model.parse_token_value("1m"), 1 * 1024 * 1024) - self.assertEqual(model.parse_token_value("1M"), 1 * 1024 * 1024) - self.assertEqual(model.parse_token_value("0.5M"), 0.5 * 1024 * 1024) - - # Test with spaces - self.assertEqual(model.parse_token_value(" 8k "), 8 * 1024) - - # Test conversion from other types - self.assertEqual(model.parse_token_value(8.0), 8) - - def test_set_thinking_tokens(self): - # Test that set_thinking_tokens correctly sets the tokens with different formats - model = Model("gpt-4") - - # Test with integer - model.set_thinking_tokens(8096) - self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 8096) - self.assertFalse(model.use_temperature) - - # Test with string - model.set_thinking_tokens("10k") - self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 10 * 1024) - - # Test with decimal value - model.set_thinking_tokens("0.5M") - self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 0.5 * 1024 * 1024) - - @patch("aider.models.check_pip_install_extra") - async def test_check_for_dependencies_bedrock(self, mock_check_pip): - """Test that check_for_dependencies calls check_pip_install_extra for Bedrock models""" - from aider.io import InputOutput - - io = InputOutput() - - # Test with a Bedrock model - from aider.models import check_for_dependencies - - await check_for_dependencies(io, "bedrock/anthropic.claude-3-sonnet-20240229-v1:0") - - # Verify check_pip_install_extra was called with correct arguments - mock_check_pip.assert_called_once_with( - io, "boto3", "AWS Bedrock models require the boto3 package.", ["boto3"] - ) - - @patch("aider.models.check_pip_install_extra") - async def test_check_for_dependencies_vertex_ai(self, mock_check_pip): - """Test that check_for_dependencies calls check_pip_install_extra for Vertex AI models""" - from aider.io import InputOutput - - io = InputOutput() - - # Test with a Vertex AI model - from aider.models import check_for_dependencies - - await check_for_dependencies(io, "vertex_ai/gemini-1.5-pro") - - # Verify check_pip_install_extra was called with correct arguments - mock_check_pip.assert_called_once_with( - io, - "google.cloud.aiplatform", - "Google Vertex AI models require the google-cloud-aiplatform package.", - ["google-cloud-aiplatform"], - ) - - @patch("aider.models.check_pip_install_extra") - async def test_check_for_dependencies_other_model(self, mock_check_pip): - """Test that check_for_dependencies doesn't call check_pip_install_extra for other models""" - from aider.io import InputOutput - - io = InputOutput() - - # Test with a non-Bedrock, non-Vertex AI model - from aider.models import check_for_dependencies - - await check_for_dependencies(io, "gpt-4") - - # Verify check_pip_install_extra was not called - mock_check_pip.assert_not_called() - - def test_get_repo_map_tokens(self): - # Test default case (no max_input_tokens in info) - model = Model("gpt-4") - model.info = {} - self.assertEqual(model.get_repo_map_tokens(), 1024) - - # Test minimum boundary (max_input_tokens < 8192) - model.info = {"max_input_tokens": 4096} - self.assertEqual(model.get_repo_map_tokens(), 1024) - - # Test middle range (max_input_tokens = 16384) - model.info = {"max_input_tokens": 16384} - self.assertEqual(model.get_repo_map_tokens(), 2048) - - # Test maximum boundary (max_input_tokens > 32768) - model.info = {"max_input_tokens": 65536} - self.assertEqual(model.get_repo_map_tokens(), 4096) - - # Test exact boundary values - model.info = {"max_input_tokens": 8192} - self.assertEqual(model.get_repo_map_tokens(), 1024) - - model.info = {"max_input_tokens": 32768} - self.assertEqual(model.get_repo_map_tokens(), 4096) - - def test_configure_model_settings(self): - # Test o3-mini case - model = Model("something/o3-mini") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertFalse(model.use_temperature) - - # Test o1-mini case - model = Model("something/o1-mini") - self.assertTrue(model.use_repo_map) - self.assertFalse(model.use_temperature) - self.assertFalse(model.use_system_prompt) - - # Test o1-preview case - model = Model("something/o1-preview") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertFalse(model.use_temperature) - self.assertFalse(model.use_system_prompt) - - # Test o1 case - model = Model("something/o1") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertFalse(model.use_temperature) - self.assertFalse(model.streaming) - - # Test deepseek v3 case - model = Model("deepseek-v3") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertEqual(model.reminder, "sys") - self.assertTrue(model.examples_as_sys_msg) - - # Test deepseek reasoner case - model = Model("deepseek-r1") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.examples_as_sys_msg) - self.assertFalse(model.use_temperature) - self.assertEqual(model.reasoning_tag, "think") - - # Test provider/deepseek-r1 case - model = Model("someprovider/deepseek-r1") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.examples_as_sys_msg) - self.assertFalse(model.use_temperature) - self.assertEqual(model.reasoning_tag, "think") - - # Test provider/deepseek-v3 case - model = Model("anotherprovider/deepseek-v3") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertEqual(model.reminder, "sys") - self.assertTrue(model.examples_as_sys_msg) - - # Test llama3 70b case - model = Model("llama3-70b") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.send_undo_reply) - self.assertTrue(model.examples_as_sys_msg) - - # Test gpt-4 case - model = Model("gpt-4") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.send_undo_reply) - - # Test gpt-3.5 case - model = Model("gpt-3.5") - self.assertEqual(model.reminder, "sys") - - # Test 3.5-sonnet case - model = Model("claude-3.5-sonnet") - self.assertEqual(model.edit_format, "diff") - self.assertTrue(model.use_repo_map) - self.assertTrue(model.examples_as_sys_msg) - self.assertEqual(model.reminder, "user") - - # Test o1- prefix case - model = Model("o1-something") - self.assertFalse(model.use_system_prompt) - self.assertFalse(model.use_temperature) - - # Test qwen case - model = Model("qwen-coder-2.5-32b") - self.assertEqual(model.edit_format, "diff") - self.assertEqual(model.editor_edit_format, "editor-diff") - self.assertTrue(model.use_repo_map) - - def test_aider_extra_model_settings(self): - import tempfile - - import yaml - - # Create temporary YAML file with test settings - test_settings = [ - { - "name": "aider/extra_params", - "extra_params": { - "extra_headers": {"Foo": "bar"}, - "some_param": "some value", - }, - }, - ] - - # Write to a regular file instead of NamedTemporaryFile - # for better cross-platform compatibility - tmp = tempfile.mktemp(suffix=".yml") - try: - with open(tmp, "w") as f: - yaml.dump(test_settings, f) - - # Register the test settings - register_models([tmp]) - - # Test that defaults are applied when no exact match - model = Model("claude-3-5-sonnet-20240620") - # Test that both the override and existing headers are present - model = Model("claude-3-5-sonnet-20240620") - self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar") - self.assertEqual( - model.extra_params["extra_headers"]["anthropic-beta"], - ANTHROPIC_BETA_HEADER, - ) - self.assertEqual(model.extra_params["some_param"], "some value") - self.assertEqual(model.extra_params["max_tokens"], 8192) - - # Test that exact match overrides defaults but not overrides - model = Model("gpt-4") - self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar") - self.assertEqual(model.extra_params["some_param"], "some value") - finally: - # Clean up the temporary file - import os - - try: - os.unlink(tmp) - except OSError: - pass - - @patch("aider.models.litellm.acompletion") - @patch.object(Model, "token_count") - async def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion): - mock_token_count.return_value = 1000 - - model = Model("ollama/llama3") - model.extra_params = {} - messages = [{"role": "user", "content": "Hello"}] - - await model.send_completion(messages, functions=None, stream=False) - - # Verify num_ctx was calculated and added to call - expected_ctx = int(1000 * 1.25) + 8192 # 9442 - mock_completion.assert_called_once_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - num_ctx=expected_ctx, - timeout=600, - cache_control_injection_points=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_modern_tool_call_propagation(self, mock_completion): - # Test modern tool calling (used for MCP Server Tool Calls) - model = Model("gpt-4") - messages = [{"role": "user", "content": "Hello"}] - - await model.send_completion( - messages, functions=None, stream=False, tools=[dict(type="function", function="test")] - ) - - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - tools=[dict(type="function", function="test")], - temperature=0, - timeout=600, - cache_control_injection_points=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_legacy_tool_call_propagation(self, mock_completion): - # Test modern tool calling (used for legacy server tool calling) - model = Model("gpt-4") - messages = [{"role": "user", "content": "Hello"}] - - await model.send_completion(messages, functions=["test"], stream=False) - - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - tools=[dict(type="function", function="test")], - temperature=0, - timeout=600, - cache_control_injection_points=ANY, - tool_choice=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_ollama_uses_existing_num_ctx(self, mock_completion): - model = Model("ollama/llama3") - model.extra_params = {"num_ctx": 4096} - - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - - # Should use provided num_ctx from extra_params - mock_completion.assert_called_once_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - num_ctx=4096, - timeout=600, - cache_control_injection_points=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_non_ollama_no_num_ctx(self, mock_completion): - model = Model("gpt-4") - model.extra_params = {} - messages = [{"role": "user", "content": "Hello"}] - - await model.send_completion(messages, functions=None, stream=False) - - # Regular models shouldn't get num_ctx - mock_completion.assert_called_once_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - timeout=600, - cache_control_injection_points=ANY, - ) - self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) - - def test_use_temperature_settings(self): - # Test use_temperature=True (default) uses temperature=0 - model = Model("gpt-4") - self.assertTrue(model.use_temperature) - self.assertEqual(model.use_temperature, True) - - # Test use_temperature=False doesn't pass temperature - model = Model("github/o1-mini") - self.assertFalse(model.use_temperature) - - # Test use_temperature as float value - model = Model("gpt-4") - model.use_temperature = 0.7 - self.assertEqual(model.use_temperature, 0.7) - - @patch("aider.models.litellm.acompletion") - async def test_request_timeout_default(self, mock_completion): - # Test default timeout is used when not specified in extra_params - model = Model("gpt-4") - model.extra_params = {} - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - timeout=600, # Default timeout - cache_control_injection_points=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_request_timeout_from_extra_params(self, mock_completion): - # Test timeout from extra_params overrides default - model = Model("gpt-4") - model.extra_params = {"timeout": 300} # 5 minutes - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - timeout=300, # From extra_params - cache_control_injection_points=ANY, - ) - - @patch("aider.models.litellm.acompletion") - async def test_use_temperature_in_send_completion(self, mock_completion): - # Test use_temperature=True sends temperature=0 - model = Model("gpt-4") - model.extra_params = {} - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0, - timeout=600, - cache_control_injection_points=ANY, - ) - - # Test use_temperature=False doesn't send temperature - model = Model("github/o1-mini") - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - self.assertNotIn("temperature", mock_completion.call_args.kwargs) - - # Test use_temperature as float sends that value - model = Model("gpt-4") - model.extra_params = {} - model.use_temperature = 0.7 - messages = [{"role": "user", "content": "Hello"}] - await model.send_completion(messages, functions=None, stream=False) - mock_completion.assert_called_with( - model=model.name, - messages=ANY, - stream=False, - temperature=0.7, - timeout=600, - cache_control_injection_points=ANY, - ) - - -if __name__ == "__main__": - unittest.main() +import unittest +from unittest.mock import ANY, MagicMock, patch + +from aider.models import ( + ANTHROPIC_BETA_HEADER, + Model, + ModelInfoManager, + register_models, + sanity_check_model, + sanity_check_models, +) + + +class TestModels(unittest.TestCase): + def setUp(self): + """Reset MODEL_SETTINGS before each test""" + from aider.models import MODEL_SETTINGS + + self._original_settings = MODEL_SETTINGS.copy() + + def tearDown(self): + """Restore original MODEL_SETTINGS after each test""" + from aider.models import MODEL_SETTINGS + + MODEL_SETTINGS.clear() + MODEL_SETTINGS.extend(self._original_settings) + + def test_get_model_info_nonexistent(self): + manager = ModelInfoManager() + info = manager.get_model_info("non-existent-model") + self.assertEqual(info, {}) + + def test_max_context_tokens(self): + model = Model("gpt-3.5-turbo") + self.assertEqual(model.info["max_input_tokens"], 16385) + + model = Model("gpt-3.5-turbo-16k") + self.assertEqual(model.info["max_input_tokens"], 16385) + + model = Model("gpt-3.5-turbo-1106") + self.assertEqual(model.info["max_input_tokens"], 16385) + + model = Model("gpt-4") + self.assertEqual(model.info["max_input_tokens"], 8 * 1024) + + model = Model("gpt-4-32k") + self.assertEqual(model.info["max_input_tokens"], 32 * 1024) + + model = Model("gpt-4-0613") + self.assertEqual(model.info["max_input_tokens"], 8 * 1024) + + @patch("os.environ") + async def test_sanity_check_model_all_set(self, mock_environ): + mock_environ.get.return_value = "dummy_value" + mock_io = MagicMock() + model = MagicMock() + model.name = "test-model" + model.missing_keys = ["API_KEY1", "API_KEY2"] + model.keys_in_environment = True + model.info = {"some": "info"} + + await sanity_check_model(mock_io, model) + + mock_io.tool_output.assert_called() + calls = mock_io.tool_output.call_args_list + self.assertIn("- API_KEY1: Set", str(calls)) + self.assertIn("- API_KEY2: Set", str(calls)) + + @patch("os.environ") + async def test_sanity_check_model_not_set(self, mock_environ): + mock_environ.get.return_value = "" + mock_io = MagicMock() + model = MagicMock() + model.name = "test-model" + model.missing_keys = ["API_KEY1", "API_KEY2"] + model.keys_in_environment = True + model.info = {"some": "info"} + + await sanity_check_model(mock_io, model) + + mock_io.tool_output.assert_called() + calls = mock_io.tool_output.call_args_list + self.assertIn("- API_KEY1: Not set", str(calls)) + self.assertIn("- API_KEY2: Not set", str(calls)) + + async def test_sanity_check_models_bogus_editor(self): + mock_io = MagicMock() + main_model = Model("gpt-4") + main_model.editor_model = Model("bogus-model") + + result = await sanity_check_models(mock_io, main_model) + + self.assertTrue( + result + ) # Should return True because there's a problem with the editor model + mock_io.tool_warning.assert_called_with(ANY) # Ensure a warning was issued + + warning_messages = [ + warning_call.args[0] for warning_call in mock_io.tool_warning.call_args_list + ] + print("Warning messages:", warning_messages) # Add this line + + self.assertGreaterEqual(mock_io.tool_warning.call_count, 1) # Expect two warnings + self.assertTrue( + any("bogus-model" in msg for msg in warning_messages) + ) # Check that one of the warnings mentions the bogus model + + @patch("aider.models.check_for_dependencies") + async def test_sanity_check_model_calls_check_dependencies(self, mock_check_deps): + """Test that sanity_check_model calls check_for_dependencies""" + mock_io = MagicMock() + model = MagicMock() + model.name = "test-model" + model.missing_keys = [] + model.keys_in_environment = True + model.info = {"some": "info"} + + await sanity_check_model(mock_io, model) + + # Verify check_for_dependencies was called with the model name + mock_check_deps.assert_called_once_with(mock_io, "test-model") + + def test_model_aliases(self): + # Test common aliases + model = Model("4") + self.assertEqual(model.name, "gpt-4-0613") + + model = Model("4o") + self.assertEqual(model.name, "gpt-4o") + + model = Model("35turbo") + self.assertEqual(model.name, "gpt-3.5-turbo") + + model = Model("35-turbo") + self.assertEqual(model.name, "gpt-3.5-turbo") + + model = Model("3") + self.assertEqual(model.name, "gpt-3.5-turbo") + + model = Model("sonnet") + self.assertEqual(model.name, "anthropic/claude-sonnet-4-20250514") + + model = Model("haiku") + self.assertEqual(model.name, "claude-3-5-haiku-20241022") + + model = Model("opus") + self.assertEqual(model.name, "claude-opus-4-20250514") + + # Test non-alias passes through unchanged + model = Model("gpt-4") + self.assertEqual(model.name, "gpt-4") + + def test_o1_use_temp_false(self): + # Test GitHub Copilot models + model = Model("github/o1-mini") + self.assertEqual(model.name, "github/o1-mini") + self.assertEqual(model.use_temperature, False) + + model = Model("github/o1-preview") + self.assertEqual(model.name, "github/o1-preview") + self.assertEqual(model.use_temperature, False) + + def test_parse_token_value(self): + # Create a model instance to test the parse_token_value method + model = Model("gpt-4") + + # Test integer inputs + self.assertEqual(model.parse_token_value(8096), 8096) + self.assertEqual(model.parse_token_value(1000), 1000) + + # Test string inputs + self.assertEqual(model.parse_token_value("8096"), 8096) + + # Test k/K suffix (kilobytes) + self.assertEqual(model.parse_token_value("8k"), 8 * 1024) + self.assertEqual(model.parse_token_value("8K"), 8 * 1024) + self.assertEqual(model.parse_token_value("10.5k"), 10.5 * 1024) + self.assertEqual(model.parse_token_value("0.5K"), 0.5 * 1024) + + # Test m/M suffix (megabytes) + self.assertEqual(model.parse_token_value("1m"), 1 * 1024 * 1024) + self.assertEqual(model.parse_token_value("1M"), 1 * 1024 * 1024) + self.assertEqual(model.parse_token_value("0.5M"), 0.5 * 1024 * 1024) + + # Test with spaces + self.assertEqual(model.parse_token_value(" 8k "), 8 * 1024) + + # Test conversion from other types + self.assertEqual(model.parse_token_value(8.0), 8) + + def test_set_thinking_tokens(self): + # Test that set_thinking_tokens correctly sets the tokens with different formats + model = Model("gpt-4") + + # Test with integer + model.set_thinking_tokens(8096) + self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 8096) + self.assertFalse(model.use_temperature) + + # Test with string + model.set_thinking_tokens("10k") + self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 10 * 1024) + + # Test with decimal value + model.set_thinking_tokens("0.5M") + self.assertEqual(model.extra_params["thinking"]["budget_tokens"], 0.5 * 1024 * 1024) + + @patch("aider.models.check_pip_install_extra") + async def test_check_for_dependencies_bedrock(self, mock_check_pip): + """Test that check_for_dependencies calls check_pip_install_extra for Bedrock models""" + from aider.io import InputOutput + + io = InputOutput() + + # Test with a Bedrock model + from aider.models import check_for_dependencies + + await check_for_dependencies(io, "bedrock/anthropic.claude-3-sonnet-20240229-v1:0") + + # Verify check_pip_install_extra was called with correct arguments + mock_check_pip.assert_called_once_with( + io, "boto3", "AWS Bedrock models require the boto3 package.", ["boto3"] + ) + + @patch("aider.models.check_pip_install_extra") + async def test_check_for_dependencies_vertex_ai(self, mock_check_pip): + """Test that check_for_dependencies calls check_pip_install_extra for Vertex AI models""" + from aider.io import InputOutput + + io = InputOutput() + + # Test with a Vertex AI model + from aider.models import check_for_dependencies + + await check_for_dependencies(io, "vertex_ai/gemini-1.5-pro") + + # Verify check_pip_install_extra was called with correct arguments + mock_check_pip.assert_called_once_with( + io, + "google.cloud.aiplatform", + "Google Vertex AI models require the google-cloud-aiplatform package.", + ["google-cloud-aiplatform"], + ) + + @patch("aider.models.check_pip_install_extra") + async def test_check_for_dependencies_other_model(self, mock_check_pip): + """Test that check_for_dependencies doesn't call check_pip_install_extra for other models""" + from aider.io import InputOutput + + io = InputOutput() + + # Test with a non-Bedrock, non-Vertex AI model + from aider.models import check_for_dependencies + + await check_for_dependencies(io, "gpt-4") + + # Verify check_pip_install_extra was not called + mock_check_pip.assert_not_called() + + def test_get_repo_map_tokens(self): + # Test default case (no max_input_tokens in info) + model = Model("gpt-4") + model.info = {} + self.assertEqual(model.get_repo_map_tokens(), 1024) + + # Test minimum boundary (max_input_tokens < 8192) + model.info = {"max_input_tokens": 4096} + self.assertEqual(model.get_repo_map_tokens(), 1024) + + # Test middle range (max_input_tokens = 16384) + model.info = {"max_input_tokens": 16384} + self.assertEqual(model.get_repo_map_tokens(), 2048) + + # Test maximum boundary (max_input_tokens > 32768) + model.info = {"max_input_tokens": 65536} + self.assertEqual(model.get_repo_map_tokens(), 4096) + + # Test exact boundary values + model.info = {"max_input_tokens": 8192} + self.assertEqual(model.get_repo_map_tokens(), 1024) + + model.info = {"max_input_tokens": 32768} + self.assertEqual(model.get_repo_map_tokens(), 4096) + + def test_configure_model_settings(self): + # Test o3-mini case + model = Model("something/o3-mini") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + + # Test o1-mini case + model = Model("something/o1-mini") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.use_system_prompt) + + # Test o1-preview case + model = Model("something/o1-preview") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.use_system_prompt) + + # Test o1 case + model = Model("something/o1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertFalse(model.use_temperature) + self.assertFalse(model.streaming) + + # Test deepseek v3 case + model = Model("deepseek-v3") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertEqual(model.reminder, "sys") + self.assertTrue(model.examples_as_sys_msg) + + # Test deepseek reasoner case + model = Model("deepseek-r1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertFalse(model.use_temperature) + self.assertEqual(model.reasoning_tag, "think") + + # Test provider/deepseek-r1 case + model = Model("someprovider/deepseek-r1") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertFalse(model.use_temperature) + self.assertEqual(model.reasoning_tag, "think") + + # Test provider/deepseek-v3 case + model = Model("anotherprovider/deepseek-v3") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertEqual(model.reminder, "sys") + self.assertTrue(model.examples_as_sys_msg) + + # Test llama3 70b case + model = Model("llama3-70b") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.send_undo_reply) + self.assertTrue(model.examples_as_sys_msg) + + # Test gpt-4 case + model = Model("gpt-4") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.send_undo_reply) + + # Test gpt-3.5 case + model = Model("gpt-3.5") + self.assertEqual(model.reminder, "sys") + + # Test 3.5-sonnet case + model = Model("claude-3.5-sonnet") + self.assertEqual(model.edit_format, "diff") + self.assertTrue(model.use_repo_map) + self.assertTrue(model.examples_as_sys_msg) + self.assertEqual(model.reminder, "user") + + # Test o1- prefix case + model = Model("o1-something") + self.assertFalse(model.use_system_prompt) + self.assertFalse(model.use_temperature) + + # Test qwen case + model = Model("qwen-coder-2.5-32b") + self.assertEqual(model.edit_format, "diff") + self.assertEqual(model.editor_edit_format, "editor-diff") + self.assertTrue(model.use_repo_map) + + def test_aider_extra_model_settings(self): + import tempfile + + import yaml + + # Create temporary YAML file with test settings + test_settings = [ + { + "name": "aider/extra_params", + "extra_params": { + "extra_headers": {"Foo": "bar"}, + "some_param": "some value", + }, + }, + ] + + # Write to a regular file instead of NamedTemporaryFile + # for better cross-platform compatibility + tmp = tempfile.mktemp(suffix=".yml") + try: + with open(tmp, "w") as f: + yaml.dump(test_settings, f) + + # Register the test settings + register_models([tmp]) + + # Test that defaults are applied when no exact match + model = Model("claude-3-5-sonnet-20240620") + # Test that both the override and existing headers are present + model = Model("claude-3-5-sonnet-20240620") + self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar") + self.assertEqual( + model.extra_params["extra_headers"]["anthropic-beta"], + ANTHROPIC_BETA_HEADER, + ) + self.assertEqual(model.extra_params["some_param"], "some value") + self.assertEqual(model.extra_params["max_tokens"], 8192) + + # Test that exact match overrides defaults but not overrides + model = Model("gpt-4") + self.assertEqual(model.extra_params["extra_headers"]["Foo"], "bar") + self.assertEqual(model.extra_params["some_param"], "some value") + finally: + # Clean up the temporary file + import os + + try: + os.unlink(tmp) + except OSError: + pass + + @patch("aider.models.litellm.acompletion") + @patch.object(Model, "token_count") + async def test_ollama_num_ctx_set_when_missing(self, mock_token_count, mock_completion): + mock_token_count.return_value = 1000 + + model = Model("ollama/llama3") + model.extra_params = {} + messages = [{"role": "user", "content": "Hello"}] + + await model.send_completion(messages, functions=None, stream=False) + + # Verify num_ctx was calculated and added to call + expected_ctx = int(1000 * 1.25) + 8192 # 9442 + mock_completion.assert_called_once_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + num_ctx=expected_ctx, + timeout=600, + cache_control_injection_points=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_modern_tool_call_propagation(self, mock_completion): + # Test modern tool calling (used for MCP Server Tool Calls) + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + + await model.send_completion( + messages, functions=None, stream=False, tools=[dict(type="function", function="test")] + ) + + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + tools=[dict(type="function", function="test")], + temperature=0, + timeout=600, + cache_control_injection_points=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_legacy_tool_call_propagation(self, mock_completion): + # Test modern tool calling (used for legacy server tool calling) + model = Model("gpt-4") + messages = [{"role": "user", "content": "Hello"}] + + await model.send_completion(messages, functions=["test"], stream=False) + + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + tools=[dict(type="function", function="test")], + temperature=0, + timeout=600, + cache_control_injection_points=ANY, + tool_choice=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_ollama_uses_existing_num_ctx(self, mock_completion): + model = Model("ollama/llama3") + model.extra_params = {"num_ctx": 4096} + + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + + # Should use provided num_ctx from extra_params + mock_completion.assert_called_once_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + num_ctx=4096, + timeout=600, + cache_control_injection_points=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_non_ollama_no_num_ctx(self, mock_completion): + model = Model("gpt-4") + model.extra_params = {} + messages = [{"role": "user", "content": "Hello"}] + + await model.send_completion(messages, functions=None, stream=False) + + # Regular models shouldn't get num_ctx + mock_completion.assert_called_once_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + timeout=600, + cache_control_injection_points=ANY, + ) + self.assertNotIn("num_ctx", mock_completion.call_args.kwargs) + + def test_use_temperature_settings(self): + # Test use_temperature=True (default) uses temperature=0 + model = Model("gpt-4") + self.assertTrue(model.use_temperature) + self.assertEqual(model.use_temperature, True) + + # Test use_temperature=False doesn't pass temperature + model = Model("github/o1-mini") + self.assertFalse(model.use_temperature) + + # Test use_temperature as float value + model = Model("gpt-4") + model.use_temperature = 0.7 + self.assertEqual(model.use_temperature, 0.7) + + @patch("aider.models.litellm.acompletion") + async def test_request_timeout_default(self, mock_completion): + # Test default timeout is used when not specified in extra_params + model = Model("gpt-4") + model.extra_params = {} + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + timeout=600, # Default timeout + cache_control_injection_points=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_request_timeout_from_extra_params(self, mock_completion): + # Test timeout from extra_params overrides default + model = Model("gpt-4") + model.extra_params = {"timeout": 300} # 5 minutes + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + timeout=300, # From extra_params + cache_control_injection_points=ANY, + ) + + @patch("aider.models.litellm.acompletion") + async def test_use_temperature_in_send_completion(self, mock_completion): + # Test use_temperature=True sends temperature=0 + model = Model("gpt-4") + model.extra_params = {} + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0, + timeout=600, + cache_control_injection_points=ANY, + ) + + # Test use_temperature=False doesn't send temperature + model = Model("github/o1-mini") + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + self.assertNotIn("temperature", mock_completion.call_args.kwargs) + + # Test use_temperature as float sends that value + model = Model("gpt-4") + model.extra_params = {} + model.use_temperature = 0.7 + messages = [{"role": "user", "content": "Hello"}] + await model.send_completion(messages, functions=None, stream=False) + mock_completion.assert_called_with( + model=model.name, + messages=ANY, + stream=False, + temperature=0.7, + timeout=600, + cache_control_injection_points=ANY, + ) + + def test_model_override_kwargs(self): + """Test that override kwargs are applied to model extra_params.""" + # Test with override kwargs + model = Model("gpt-4", override_kwargs={"temperature": 0.8, "top_p": 0.9}) + self.assertIn("temperature", model.extra_params) + self.assertEqual(model.extra_params["temperature"], 0.8) + self.assertIn("top_p", model.extra_params) + self.assertEqual(model.extra_params["top_p"], 0.9) + + # Test that override kwargs merge with existing extra_params + model = Model("gpt-4", override_kwargs={"extra_headers": {"X-Custom": "value"}}) + self.assertIn("extra_headers", model.extra_params) + self.assertIn("X-Custom", model.extra_params["extra_headers"]) + self.assertEqual(model.extra_params["extra_headers"]["X-Custom"], "value") + + # Test nested dict merging + model = Model("gpt-4", override_kwargs={"extra_body": {"reasoning_effort": "high"}}) + self.assertIn("extra_body", model.extra_params) + self.assertIn("reasoning_effort", model.extra_params["extra_body"]) + self.assertEqual(model.extra_params["extra_body"]["reasoning_effort"], "high") + + def test_model_override_kwargs_with_existing_extra_params(self): + """Test that override kwargs merge correctly with existing extra_params.""" + # Create a model with existing extra_params via model settings + import tempfile + + import yaml + + test_settings = [ + { + "name": "gpt-4", + "extra_params": {"temperature": 0.5, "extra_headers": {"Existing": "header"}}, + }, + ] + + tmp = tempfile.mktemp(suffix=".yml") + try: + with open(tmp, "w") as f: + yaml.dump(test_settings, f) + + register_models([tmp]) + + # Test that override kwargs take precedence + model = Model("gpt-4", override_kwargs={"temperature": 0.8, "top_p": 0.9}) + self.assertEqual(model.extra_params["temperature"], 0.8) # Override wins + self.assertEqual(model.extra_params["top_p"], 0.9) # New param added + self.assertIn("extra_headers", model.extra_params) + self.assertEqual( + model.extra_params["extra_headers"]["Existing"], "header" + ) # Existing preserved + + # Test nested dict merging + model = Model("gpt-4", override_kwargs={"extra_headers": {"New": "value"}}) + self.assertIn("Existing", model.extra_params["extra_headers"]) + self.assertIn("New", model.extra_params["extra_headers"]) + self.assertEqual(model.extra_params["extra_headers"]["Existing"], "header") + self.assertEqual(model.extra_params["extra_headers"]["New"], "value") + finally: + import os + + try: + os.unlink(tmp) + except OSError: + pass + + @patch("aider.models.litellm.acompletion") + async def test_send_completion_with_override_kwargs(self, mock_completion): + """Test that override kwargs are passed to acompletion.""" + # Create model with override kwargs + model = Model("gpt-4", override_kwargs={"temperature": 0.8, "top_p": 0.9}) + messages = [{"role": "user", "content": "Hello"}] + + await model.send_completion(messages, functions=None, stream=False) + + # Check that override kwargs are in the call + mock_completion.assert_called_once() + call_kwargs = mock_completion.call_args.kwargs + + self.assertIn("temperature", call_kwargs) + self.assertEqual(call_kwargs["temperature"], 0.8) + self.assertIn("top_p", call_kwargs) + self.assertEqual(call_kwargs["top_p"], 0.9) + + # Check that model name and other defaults are still there + self.assertEqual(call_kwargs["model"], "gpt-4") + self.assertFalse(call_kwargs["stream"]) + + def test_parse_model_with_suffix(self): + """Test the parse_model_with_suffix function from main.py.""" + + # This test simulates the parse_model_with_suffix function logic + def parse_model_with_suffix(model_name, overrides): + """Parse model name with optional :suffix and apply overrides.""" + if not model_name: + return model_name, {} + + # Split on last colon to get model name and suffix + if ":" in model_name: + base_model, suffix = model_name.rsplit(":", 1) + else: + base_model, suffix = model_name, None + + # Apply overrides if suffix exists + override_kwargs = {} + if suffix and base_model in overrides and suffix in overrides[base_model]: + override_kwargs = overrides[base_model][suffix].copy() + + return base_model, override_kwargs + + # Test cases + overrides = { + "gpt-4o": { + "high": {"reasoning_effort": "high", "temperature": 0.7}, + "low": {"reasoning_effort": "low", "temperature": 0.2}, + }, + "claude-3-5-sonnet": {"fast": {"temperature": 0.3}, "creative": {"temperature": 0.9}}, + } + + # Test with suffix + base_model, kwargs = parse_model_with_suffix("gpt-4o:high", overrides) + self.assertEqual(base_model, "gpt-4o") + self.assertEqual(kwargs, {"reasoning_effort": "high", "temperature": 0.7}) + + # Test with different suffix + base_model, kwargs = parse_model_with_suffix("gpt-4o:low", overrides) + self.assertEqual(base_model, "gpt-4o") + self.assertEqual(kwargs, {"reasoning_effort": "low", "temperature": 0.2}) + + # Test without suffix + base_model, kwargs = parse_model_with_suffix("gpt-4o", overrides) + self.assertEqual(base_model, "gpt-4o") + self.assertEqual(kwargs, {}) + + # Test with unknown suffix + base_model, kwargs = parse_model_with_suffix("gpt-4o:unknown", overrides) + self.assertEqual(base_model, "gpt-4o") + self.assertEqual(kwargs, {}) + + # Test with unknown model + base_model, kwargs = parse_model_with_suffix("unknown-model:high", overrides) + self.assertEqual(base_model, "unknown-model") + self.assertEqual(kwargs, {}) + + # Test empty model name + base_model, kwargs = parse_model_with_suffix("", overrides) + self.assertEqual(base_model, "") + self.assertEqual(kwargs, {}) + + +if __name__ == "__main__": + unittest.main() From 98afe8c26b906eefb5108d31dfa24e79789ee556 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Mon, 15 Dec 2025 00:53:40 -0500 Subject: [PATCH 26/28] Documentation Updates --- README.md | 3 ++- aider/website/docs/config/mcp.md | 41 ++++++++++++++++++++++++++------ 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index e79afb420ea..df06438c996 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,9 @@ LLMs are a part of our lives from here on out so join us in learning about and c * [Agent Mode](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/agent-mode.md) * [MCP Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/mcp.md) * [TUI Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/tui.md) -* [Session Management](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/sessions.md) * [Skills](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/skills.md) +* [Session Management](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/sessions.md) +* [Advanced Model Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/model-aliases.md# advanced-model-settings) * [Aider Original Documentation (still mostly applies)](https://aider.chat/) You can see a selection of the enhancements and updates by comparing the help output: diff --git a/aider/website/docs/config/mcp.md b/aider/website/docs/config/mcp.md index a6d698c294b..35f17f142c2 100644 --- a/aider/website/docs/config/mcp.md +++ b/aider/website/docs/config/mcp.md @@ -18,11 +18,13 @@ You have two ways of sharing your MCP server configuration with Aider. {: .note } -> Today, Aider supports connecting to MCP servers using stdio and http transports. +> Today, Aider-CE/Cecli supports connecting to MCP servers using stdio, http, and sse transports. ### Config Files -You can also configure MCP servers in your `.aider.conf.yml` file: +You can configure MCP servers in your `.aider.conf.yml` file using either JSON or YAML format: + +#### JSON Configuration ```yaml mcp-servers: | @@ -36,6 +38,19 @@ mcp-servers: | } ``` +#### YAML Configuration + +```yaml +mcp-servers: + mcpServers: + context7: + transport: http + url: https://mcp.context7.com/mcp + deepwiki: + transport: http + url: https://mcp.deepwiki.com/mcp +``` + Or specify a configuration file: ```yaml @@ -46,7 +61,7 @@ These options are configurable in any of Aider's config file formats. ### Flags -You can specify MCP servers directly on the command line using the `--mcp-servers` option with a JSON string: +You can specify MCP servers directly on the command line using the `--mcp-servers` option with a JSON or YAML string: #### Using a JSON String @@ -54,9 +69,21 @@ You can specify MCP servers directly on the command line using the `--mcp-server aider --mcp-servers '{"mcpServers":{"git":{"transport":"http","url":"http://localhost:8000"}}}' ``` +#### Using a YAML String + +```bash +aider --mcp-servers 'mcpServers: + context7: + transport: http + url: https://mcp.context7.com/mcp + deepwiki: + transport: http + url: https://mcp.deepwiki.com/mcp' +``` + #### Using a configuration file -Alternatively, you can store your MCP server configurations in a JSON file and reference it with the `--mcp-servers-file` option: +Alternatively, you can store your MCP server configurations in a JSON or YAML file and reference it with the `--mcp-servers-file` option: ```bash aider --mcp-servers-file mcp.json @@ -72,10 +99,10 @@ aider --mcp-transport http ### Environment Variables -You can also configure MCP servers using environment variables in your `.env` file: +You can also configure MCP servers using environment variables in your `.env` file using JSON or YAML format: ``` -AIDER_MCP_SERVERS={"mcpServers":{"git":{"command":"uvx","args":["mcp-server-git"]}}} +AIDER_MCP_SERVERS={"mcpServers":{"git":{"transport": "stdio", "command":"uvx","args":["mcp-server-git"]}}} ``` Or specify a configuration file: @@ -90,6 +117,6 @@ If you encounter issues with MCP servers: 1. Use the `--verbose` flag to see detailed information about MCP server loading 2. Check that the specified executables are installed and available in your PATH -3. Verify that your JSON configuration is valid +3. Verify that your JSON or YAML configuration is valid For more information about specific MCP servers and their capabilities, refer to their respective documentation. From 22a1ec65d44fcfc15170fc68373b9008eff0b1b9 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Mon, 15 Dec 2025 00:54:39 -0500 Subject: [PATCH 27/28] TUI, by default since it looks good as heck --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index df06438c996..0bd2a06dbab 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ preserve-todo-list: true show-model-warnings: true use-enhanced-map: true watch-files: false +tui: true agent-config: large_file_token_threshold: 12500 From ead5d086099e5a9912065c019589031c64322adc Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Mon, 15 Dec 2025 00:59:03 -0500 Subject: [PATCH 28/28] #259: Add cost analyzer script --- scripts/cost_analyzer.py | 144 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 scripts/cost_analyzer.py diff --git a/scripts/cost_analyzer.py b/scripts/cost_analyzer.py new file mode 100644 index 00000000000..fe50767c671 --- /dev/null +++ b/scripts/cost_analyzer.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +import argparse +import re +from dataclasses import dataclass +from pathlib import Path + + +@dataclass +class TokenUsage: + """Store token usage and cost information.""" + + sent: int + received: int + cost: float + + +def parse_token_line(line): + """ + Parse a token usage line from Aider history. + Example: "> Tokens: 12k sent, 133 received. Cost: $0.04 message, $0.10 session." + """ + # fmt: off + pattern = r">\s*Tokens:\s*(\d+(?:\.\d+)?k?)\s+sent,\s*(\d+(?:\.\d+)?k?)\s+received\.\s*Cost:\s*\$(\d+\.\d+)\s+message" # noqa + # fmt: on + + match = re.search(pattern, line) + if not match: + return None + + # Parse sent tokens (handle 'k' suffix) + sent_str = match.group(1) + sent = int(float(sent_str.rstrip("k")) * 1000) if "k" in sent_str else int(sent_str) + + # Parse received tokens (handle 'k' suffix) + received_str = match.group(2) + received = ( + int(float(received_str.rstrip("k")) * 1000) if "k" in received_str else int(received_str) + ) + + # Parse cost + cost = float(match.group(3)) + + return TokenUsage(sent=sent, received=received, cost=cost) + + +def extract_costs(history_file): + """Extract cost and token information from Aider history file.""" + usages = [] + + with open(history_file, "r", encoding="utf-8") as f: + for line in f: + usage = parse_token_line(line) + if usage: + usages.append(usage) + + return usages + + +def main(): + parser = argparse.ArgumentParser( + description="Calculate total Aider session costs from history files" + ) + parser.add_argument( + "directory", + nargs="?", + default=".", + help="Directory to analyze (default: current directory)", + ) + parser.add_argument( + "-r", "--recursive", action="store_true", help="Search recursively in subdirectories" + ) + parser.add_argument( + "-v", "--verbose", action="store_true", help="Show detailed token usage per message" + ) + + args = parser.parse_args() + + # Convert to Path object + target_dir = Path(args.directory) + + if not target_dir.exists(): + print(f"Error: Directory '{target_dir}' does not exist") + return 1 + + if not target_dir.is_dir(): + print(f"Error: '{target_dir}' is not a directory") + return 1 + + # Find all history files + pattern = "**/.aider*.history.md" if args.recursive else ".aider*.history.md" + history_files = list(target_dir.glob(pattern)) + + if not history_files: + print(f"No Aider history files found in '{target_dir}'") + return 0 + + total_cost = 0.0 + total_sent = 0 + total_received = 0 + + print(f"Analyzing {len(history_files)} history file(s) in '{target_dir}':\n") + + for hist_file in sorted(history_files): + usages = extract_costs(hist_file) + + if not usages: + continue + + file_cost = sum(u.cost for u in usages) + file_sent = sum(u.sent for u in usages) + file_received = sum(u.received for u in usages) + + total_cost += file_cost + total_sent += file_sent + total_received += file_received + + # Show relative path for better readability + rel_path = hist_file.relative_to(target_dir) if args.recursive else hist_file.name + + print(f"{rel_path}:") + print(f" Messages: {len(usages)}") + print(f" Tokens sent: {file_sent:,} | received: {file_received:,}") + print(f" Cost: ${file_cost:.4f}") + + if args.verbose: + print(" Details:") + for i, usage in enumerate(usages, 1): + print( + f" Message {i}: {usage.sent:,} sent, {usage.received:,} received →" + f" ${usage.cost:.4f}" + ) + + print() + + print(f"{'=' * 60}") + print(f"Total tokens sent: {total_sent:,}") + print(f"Total tokens received: {total_received:,}") + print(f"Total cost: ${total_cost:.4f}") + + return 0 + + +if __name__ == "__main__": + exit(main())