From faa7507fdf6cc5093721edf2f311de1e27c76a7a Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 13:43:57 -0500 Subject: [PATCH 1/7] Bump Version --- aider/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aider/__init__.py b/aider/__init__.py index 85def92b45c..4d273da8fec 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.88.33.dev" +__version__ = "0.88.34.dev" safe_version = __version__ try: From b924514f1b07686da051394e0e8392f84b0a2776 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 14:19:00 -0500 Subject: [PATCH 2/7] Explicitly do not include images in scraping output --- .gitignore | 2 +- README.md | 4 ++-- aider/scrape.py | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 6a8fe65642b..cbd58d37e8a 100644 --- a/.gitignore +++ b/.gitignore @@ -31,8 +31,8 @@ !/requirements.txt # Ignore specific files +.aider* aider/__version__.py aider/_version.py *.pyc -.aider* env/ diff --git a/README.md b/README.md index 05c8fbca1db..3c4e7e3df08 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ agent: true analytics: false auto-commits: true auto-save: true -auto-load: true +auto-load: false cache-prompts: true check-update: true debug: false @@ -49,7 +49,7 @@ env-file: .aider.env multiline: true preserve-todo-list: true show-model-warnings: true -watch-files: true +watch-files: false agent-config: | { "large_file_token_threshold": 12500, diff --git a/aider/scrape.py b/aider/scrape.py index 3bdef29214b..f5492bc86dd 100755 --- a/aider/scrape.py +++ b/aider/scrape.py @@ -127,6 +127,8 @@ async def scrape(self, url): ): self.try_pandoc() content = self.html_to_markdown(content) + elif mime_type and mime_type.startswith("image/"): + content = "(Response is an image, not HTML)" return content From 4e38a6834b189537a776bff15d67dec68848263f Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 14:45:26 -0500 Subject: [PATCH 3/7] #205: Mark response-parsed commands as running so they don't conflict with outputs --- aider/coders/base_coder.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 372ffb90fe2..b9ae19fc17a 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -3719,14 +3719,21 @@ async def run_shell_commands(self): done = set() group = ConfirmGroup(set(self.shell_commands)) accumulated_output = "" - for command in self.shell_commands: - if command in done: - continue - done.add(command) - output = await self.handle_shell_commands(command, group) - if output: - accumulated_output += output + "\n\n" - return accumulated_output + + try: + self.commands.cmd_running = True + + for command in self.shell_commands: + if command in done: + continue + done.add(command) + output = await self.handle_shell_commands(command, group) + if output: + accumulated_output += output + "\n\n" + + return accumulated_output + finally: + self.commands.cmd_running = False async def handle_shell_commands(self, commands_str, group): commands = commands_str.strip().splitlines() From 867bf1462458bd5f8b9bc5a37b93b01e79451ada Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 18:45:03 -0500 Subject: [PATCH 4/7] More graceful exit logic, small wait on close, debug tracer quieting --- aider/main.py | 79 +++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 37 deletions(-) diff --git a/aider/main.py b/aider/main.py index 0a9ede262d7..e6b80279bc6 100644 --- a/aider/main.py +++ b/aider/main.py @@ -496,47 +496,51 @@ def expand_glob_patterns(patterns, root="."): def custom_tracer(frame, event, arg): - import os - - global log_file - if not log_file: - os.makedirs(".aider/logs/", exist_ok=True) - log_file = open(".aider/logs/debug.log", "w", buffering=1) - - # Get the absolute path of the file where the code is executing - filename = os.path.abspath(frame.f_code.co_filename) - - # --- THE FILTERING LOGIC --- - # Only proceed if the file path is INSIDE the project root - if not filename.startswith(PROJECT_ROOT): - return None # Returning None means no local trace function for this scope - - if filename.endswith("repo.py"): - return None - - # If it's your code, trace the call - if event == "call": - func_name = frame.f_code.co_name - line_no = frame.f_lineno + try: + import os - if func_name not in file_excludelist: - log_file.write( - f"-> CALL: {func_name}() in {os.path.basename(filename)}:{line_no} -" - f" {time.time()}\n" - ) + global log_file + if not log_file: + os.makedirs(".aider/logs/", exist_ok=True) + log_file = open(".aider/logs/debug.log", "w", buffering=1) + + # Get the absolute path of the file where the code is executing + filename = os.path.abspath(frame.f_code.co_filename) + + # --- THE FILTERING LOGIC --- + # Only proceed if the file path is INSIDE the project root + if not filename.startswith(PROJECT_ROOT): + return None # Returning None means no local trace function for this scope + + if filename.endswith("repo.py"): + return None + + # If it's your code, trace the call + if event == "call": + func_name = frame.f_code.co_name + line_no = frame.f_lineno + + if func_name not in file_excludelist: + log_file.write( + f"-> CALL: {func_name}() in {os.path.basename(filename)}:{line_no} -" + f" {time.time()}\n" + ) - if event == "return": - func_name = frame.f_code.co_name - line_no = frame.f_lineno + if event == "return": + func_name = frame.f_code.co_name + line_no = frame.f_lineno - if func_name not in file_excludelist: - log_file.write( - f"<- RETURN: {func_name}() in {os.path.basename(filename)}:{line_no} -" - f" {time.time()}\n" - ) + if func_name not in file_excludelist: + log_file.write( + f"<- RETURN: {func_name}() in {os.path.basename(filename)}:{line_no} -" + f" {time.time()}\n" + ) - # Must return the trace function (or a local one) for subsequent events - return custom_tracer + except Exception: + pass + finally: + # Must return the trace function (or a local one) for subsequent events + return custom_tracer def main(argv=None, input=None, output=None, force_git_root=None, return_coder=False): @@ -1446,6 +1450,7 @@ async def graceful_exit(coder=None, exit_code=0): except Exception: pass + await asyncio.sleep(0.5) return exit_code From b9266c2b8bba5c6e3936ca39ae55fcee7c70e860 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 20:13:50 -0500 Subject: [PATCH 5/7] #202: Allow the tweaking of model outputs in the non-agent modes --- aider/args.py | 6 ++++++ aider/coders/architect_coder.py | 13 ++++++++++++- aider/coders/base_coder.py | 10 ++++++++++ aider/io.py | 25 +++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/aider/args.py b/aider/args.py index b81251021ad..928a7328148 100644 --- a/aider/args.py +++ b/aider/args.py @@ -840,6 +840,12 @@ def get_parser(default_config_files, git_root): ###### group = parser.add_argument_group("Other settings") + group.add_argument( + "--tweak-responses", + action="store_true", + help="Allow manual edits to model responses (default: False)", + default=False, + ) group.add_argument( "--yes-always", action="store_true", diff --git a/aider/coders/architect_coder.py b/aider/coders/architect_coder.py index 561db3b33fe..41553f7c9e7 100644 --- a/aider/coders/architect_coder.py +++ b/aider/coders/architect_coder.py @@ -1,3 +1,5 @@ +import asyncio + from ..commands import SwitchCoder from .architect_prompts import ArchitectPrompts from .ask_coder import AskCoder @@ -15,9 +17,17 @@ async def reply_completed(self): if not content or not content.strip(): return - if not self.auto_accept_architect and not await self.io.confirm_ask("Edit the files?"): + tweak_responses = getattr(self.args, "tweak_responses", False) + confirmation = await self.io.confirm_ask("Edit the files?", allow_tweak=tweak_responses) + + if not self.auto_accept_architect and not confirmation: return + if confirmation == "tweak": + content = self.io.edit_in_editor(content) + + await asyncio.sleep(0.1) + kwargs = dict() # Use the editor_model from the main_model if it exists, otherwise use the main_model itself @@ -25,6 +35,7 @@ async def reply_completed(self): kwargs["main_model"] = editor_model kwargs["edit_format"] = self.main_model.editor_edit_format + kwargs["args"] = self.args kwargs["suggest_shell_commands"] = False kwargs["map_tokens"] = 0 kwargs["total_cost"] = self.total_cost diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index b9ae19fc17a..ac17b2563c6 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -3525,6 +3525,16 @@ async def prepare_to_edit(self, edits): async def apply_updates(self): edited = set() try: + if getattr(self.args, "tweak_responses", False): + confirmation = await self.io.confirm_ask("Tweak Response?", allow_tweak=True) + + if confirmation or confirmation == "tweak": + self.partial_response_content = self.io.edit_in_editor( + self.partial_response_content + ) + + await asyncio.sleep(0.1) + edits = self.get_edits() edits = self.apply_edits_dry_run(edits) edits = await self.prepare_to_edit(edits) diff --git a/aider/io.py b/aider/io.py index e0cdc5e7aae..0e7bd5d6b78 100644 --- a/aider/io.py +++ b/aider/io.py @@ -1114,6 +1114,24 @@ def ai_output(self, content): hist = "\n" + content.strip() + "\n\n" self.append_chat_history(hist) + def edit_in_editor(self, content): + import subprocess + import tempfile + + with tempfile.NamedTemporaryFile( + suffix=".md", mode="w", delete=False, encoding=self.encoding + ) as tmpfile: + tmpfile.write(content) + tmpfile.flush() + editor = os.environ.get("EDITOR", "vi") + subprocess.call([editor, tmpfile.name]) + + with open(tmpfile.name, "r", encoding=self.encoding) as f: + edited = f.read() + + os.unlink(tmpfile.name) + return edited + async def offer_url( self, url, prompt="Open URL for more info?", allow_never=True, acknowledge=False ): @@ -1164,6 +1182,7 @@ async def _confirm_ask( group=None, group_response=None, allow_never=False, + allow_tweak=False, acknowledge=False, ): self.num_user_asks += 1 @@ -1182,6 +1201,9 @@ async def _confirm_ask( valid_responses = ["yes", "no", "skip", "all"] options = " (Y)es/(N)o" + if allow_tweak: + valid_responses.append("tweak") + options += "/(T)weak" if group or group_response: if not explicit_yes_required or group_response: options += "/(A)ll" @@ -1268,6 +1290,9 @@ async def _confirm_ask( self.append_chat_history(hist, linebreak=True, blockquote=True) return False + if res == "t": + return "tweak" + if explicit_yes_required and not group_response: is_yes = res == "y" else: From aefaab4416ad124b042745e7f1ff62f318e8447c Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Sun, 30 Nov 2025 23:54:30 -0500 Subject: [PATCH 6/7] Agent mode cache efficiency: - Split chat files into the editable set and the most recently edited files - Keep only the most recently edited file ahead of the done messages - Use the changing of the most recently edited file as the marker for splitting the current chat messages and updating the done messages --- aider/coders/agent_coder.py | 56 ++++++++-- aider/coders/base_coder.py | 198 +++++++++++++++++++++++++++--------- aider/coders/chat_chunks.py | 24 +++-- 3 files changed, 214 insertions(+), 64 deletions(-) diff --git a/aider/coders/agent_coder.py b/aider/coders/agent_coder.py index c70f28e1f00..c1f79fb9698 100644 --- a/aider/coders/agent_coder.py +++ b/aider/coders/agent_coder.py @@ -147,6 +147,10 @@ def __init__(self, *args, **kwargs): # Enable enhanced context blocks by default self.use_enhanced_context = True + # Caching efficiency attributes + self._last_edited_file = None + self._cur_message_divider = None + # Initialize empty token tracking dictionary and cache structures # but don't populate yet to avoid startup delay self.allowed_context_blocks = set() @@ -696,8 +700,9 @@ def format_chat_chunks(self): "examples", "readonly_files", "repo", - "done", "chat_files", + "done", + "edit_files", "cur", "reminder", ] @@ -719,16 +724,22 @@ def format_chat_chunks(self): cur_messages_list = list(self.cur_messages) cur_messages_pre = [] cur_messages_post = cur_messages_list + chunks.readonly_files = self.get_readonly_files_messages() - if len(cur_messages_list) > 32: - divider = len(cur_messages_list) % 32 - if divider: - divider = -1 * divider + # Handle the dictionary structure from get_chat_files_messages() + chat_files_result = self.get_chat_files_messages() + chunks.chat_files = chat_files_result.get("chat_files", []) + chunks.edit_files = chat_files_result.get("edit_files", []) + edit_file_names = chat_files_result.get("edit_file_names", set()) + + # Update edit file tracking for caching efficiency + divider = self._update_edit_file_tracking(edit_file_names) + if divider is not None: + # Split cur_messages using the divider + if divider > 0 and divider < len(cur_messages_list): cur_messages_pre = cur_messages_list[:divider] cur_messages_post = cur_messages_list[divider:] - chunks.readonly_files = self.get_readonly_files_messages() - chunks.chat_files = self.get_chat_files_messages() chunks.repo = self.get_repo_messages() chunks.done = list(self.done_messages) + cur_messages_pre @@ -846,6 +857,37 @@ def format_chat_chunks(self): return chunks + def _update_edit_file_tracking(self, edit_file_names): + """ + Update tracking for last edited file and message divider for caching efficiency. + + When the last edited file changes, we store the current message index minus 4 + as a divider to split cur_messages, moving older messages to done_messages + for better caching. + """ + kept_messages = 8 + if not edit_file_names: + self._cur_message_divider = 0 + + # Get the most recently edited file from the edit_file_names set + # We assume the first file in the sorted set is the most recent + sorted_edit_files = sorted(edit_file_names) + current_edited_file = sorted_edit_files[0] if sorted_edit_files else None + + # Check if the last edited file has changed + if current_edited_file != self._last_edited_file: + # Store the new last edited file + self._last_edited_file = current_edited_file + + # Calculate divider: current index minus last n messages + cur_messages_list = list(self.cur_messages) + if len(cur_messages_list) > kept_messages: + self._cur_message_divider = len(cur_messages_list) - kept_messages + else: + self._cur_message_divider = 0 + + return self._cur_message_divider + def get_context_summary(self): """ Generate a summary of the current context, including file content tokens and additional context blocks, diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index ac17b2563c6..13e0e25fdf6 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -737,52 +737,109 @@ def get_files_content(self, fnames=None): if not fnames: fnames = self.abs_fnames - prompt = "" - for fname, content in self.get_abs_fnames_content(): - if not is_image_file(fname): - relative_fname = self.get_rel_fname(fname) - prompt += "\n" - prompt += relative_fname - prompt += f"\n{self.fence[0]}\n" - - # Apply context management if enabled for large files - if self.context_management_enabled: - # Calculate tokens for this file - file_tokens = self.main_model.token_count(content) - - if file_tokens > self.large_file_token_threshold: - # Truncate the file content - lines = content.splitlines() - - # Keep the first and last parts of the file with a marker in between - keep_lines = ( - self.large_file_token_threshold // 40 - ) # Rough estimate of tokens per line - first_chunk = lines[: keep_lines // 2] - last_chunk = lines[-(keep_lines // 2) :] + # If there are files, return a dictionary with chat_files and edit_files + if fnames: + # Get current time for comparison + current_time = time.time() + lookback = current_time - 30 + + # Get file modification times and sort by most recent first + file_times = [] + for fname in fnames: + try: + mtime = os.path.getmtime(fname) + file_times.append((fname, mtime)) + except OSError: + # Skip files that can't be accessed + continue - truncated_content = "\n".join(first_chunk) - truncated_content += ( - f"\n\n... [File truncated due to size ({file_tokens} tokens). Use" - " /context-management to toggle truncation off] ...\n\n" - ) - truncated_content += "\n".join(last_chunk) + # Sort by modification time (most recent first) + file_times.sort(key=lambda x: x[1], reverse=True) + + # Determine which files go to edit_files + edit_files = set() + if file_times: + # Always include the most recently edited file + most_recent_file, most_recent_time = file_times[0] + edit_files.add(most_recent_file) + + # Include any files edited within the last minute + for fname, mtime in file_times: + if mtime >= lookback: + edit_files.add(fname) + + # Build content for chat_files and edit_files + chat_files_prompt = "" + edit_files_prompt = "" + chat_file_names = set() + edit_file_names = set() + + for fname, content in self.get_abs_fnames_content(): + if not is_image_file(fname): + relative_fname = self.get_rel_fname(fname) + file_prompt = "\n" + file_prompt += relative_fname + file_prompt += f"\n{self.fence[0]}\n" + + # Apply context management if enabled for large files + if self.context_management_enabled: + # Calculate tokens for this file + file_tokens = self.main_model.token_count(content) + + if file_tokens > self.large_file_token_threshold: + # Truncate the file content + lines = content.splitlines() + + # Keep the first and last parts of the file with a marker in between + keep_lines = ( + self.large_file_token_threshold // 40 + ) # Rough estimate of tokens per line + first_chunk = lines[: keep_lines // 2] + last_chunk = lines[-(keep_lines // 2) :] + + truncated_content = "\n".join(first_chunk) + truncated_content += ( + f"\n\n... [File truncated due to size ({file_tokens} tokens). Use" + " /context-management to toggle truncation off] ...\n\n" + ) + truncated_content += "\n".join(last_chunk) - # Add message about truncation - self.io.tool_output( - f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). " - "Use /context-management to toggle truncation off if needed." - ) + # Add message about truncation + self.io.tool_output( + f"⚠️ '{relative_fname}' is very large ({file_tokens} tokens). " + "Use /context-management to toggle truncation off if needed." + ) - prompt += truncated_content + file_prompt += truncated_content + else: + file_prompt += content else: - prompt += content - else: - prompt += content + file_prompt += content - prompt += f"{self.fence[1]}\n" + file_prompt += f"{self.fence[1]}\n" - return prompt + # Add to appropriate prompt based on edit time + if fname in edit_files: + edit_files_prompt += file_prompt + edit_file_names.add(relative_fname) + else: + chat_files_prompt += file_prompt + chat_file_names.add(relative_fname) + + return { + "chat_files": chat_files_prompt, + "edit_files": edit_files_prompt, + "chat_file_names": chat_file_names, + "edit_file_names": edit_file_names, + } + else: + # Return empty dictionary when no files + return { + "chat_files": "", + "edit_files": "", + "chat_file_names": set(), + "edit_file_names": set(), + } def get_read_only_files_content(self): prompt = "" @@ -1030,22 +1087,56 @@ def get_readonly_files_messages(self): def get_chat_files_messages(self): chat_files_messages = [] + edit_files_messages = [] + chat_file_names = set() + edit_file_names = set() + if self.abs_fnames: - files_content = self.gpt_prompts.files_content_prefix - files_content += self.get_files_content() + files_content_result = self.get_files_content() + + # Get content and file names from dictionary + chat_files_content = files_content_result.get("chat_files", "") + edit_files_content = files_content_result.get("edit_files", "") + chat_file_names = files_content_result.get("chat_file_names", set()) + edit_file_names = files_content_result.get("edit_file_names", set()) + files_reply = self.gpt_prompts.files_content_assistant_reply + + if chat_files_content: + chat_files_messages += [ + dict( + role="user", + content=self.gpt_prompts.files_content_prefix + chat_files_content, + ), + dict(role="assistant", content=files_reply), + ] + + if edit_files_content: + edit_files_messages += [ + dict( + role="user", + content=self.gpt_prompts.files_content_prefix + edit_files_content, + ), + dict(role="assistant", content=files_reply), + ] elif self.gpt_prompts.files_no_full_files_with_repo_map: files_content = self.gpt_prompts.files_no_full_files_with_repo_map files_reply = self.gpt_prompts.files_no_full_files_with_repo_map_reply + + if files_content: + chat_files_messages += [ + dict(role="user", content=files_content), + dict(role="assistant", content=files_reply), + ] else: files_content = self.gpt_prompts.files_no_full_files files_reply = "Ok." - if files_content: - chat_files_messages += [ - dict(role="user", content=files_content), - dict(role="assistant", content=files_reply), - ] + if files_content: + chat_files_messages += [ + dict(role="user", content=files_content), + dict(role="assistant", content=files_reply), + ] images_message = self.get_images_message(self.abs_fnames) if images_message is not None: @@ -1054,7 +1145,12 @@ def get_chat_files_messages(self): dict(role="assistant", content="Ok."), ] - return chat_files_messages + return { + "chat_files": chat_files_messages, + "edit_files": edit_files_messages, + "chat_file_names": chat_file_names, + "edit_file_names": edit_file_names, + } def get_images_message(self, fnames): supports_images = self.main_model.info.get("supports_vision") @@ -1887,7 +1983,11 @@ def format_chat_chunks(self): chunks.repo = self.get_repo_messages() chunks.readonly_files = self.get_readonly_files_messages() - chunks.chat_files = self.get_chat_files_messages() + + # Handle the dictionary structure from get_chat_files_messages() + chat_files_result = self.get_chat_files_messages() + chunks.chat_files = chat_files_result.get("chat_files", []) + chunks.edit_files = chat_files_result.get("edit_files", []) if self.gpt_prompts.system_reminder: reminder_message = [ diff --git a/aider/coders/chat_chunks.py b/aider/coders/chat_chunks.py index 31c34035a01..db4d0c6698d 100644 --- a/aider/coders/chat_chunks.py +++ b/aider/coders/chat_chunks.py @@ -10,6 +10,7 @@ class ChatChunks: repo: List = field(default_factory=list) readonly_files: List = field(default_factory=list) chat_files: List = field(default_factory=list) + edit_files: List = field(default_factory=list) cur: List = field(default_factory=list) reminder: List = field(default_factory=list) chunk_ordering: List = field(default_factory=list) @@ -27,14 +28,15 @@ def all_messages(self): return messages else: return ( - self.system - + self.examples - + self.readonly_files - + self.chat_files - + self.repo - + self.done - + self.cur - + self.reminder + self.format_list(self.system) + + self.format_list(self.examples) + + self.format_list(self.readonly_files) + + self.format_list(self.chat_files) + + self.format_list(self.repo) + + self.format_list(self.done) + + self.format_list(self.edit_files) + + self.format_list(self.cur) + + self.format_list(self.reminder) ) def add_cache_control_headers(self): @@ -76,3 +78,9 @@ def cacheable_messages(self): ): return messages[: len(messages) - i] return messages + + def format_list(chunk): + if type(chunk) is not list: + return [] + + return chunk From 5f3e3353b9af99789f6d9715f4f8799f00b14f38 Mon Sep 17 00:00:00 2001 From: Dustin Washington Date: Mon, 1 Dec 2025 00:50:33 -0500 Subject: [PATCH 7/7] Model! Identify yourself --- aider/coders/base_coder.py | 2 +- pyproject.toml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 13e0e25fdf6..e3926bf97ba 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -245,7 +245,7 @@ async def clone(self, **kwargs): def get_announcements(self): lines = [] - lines.append(f"Aider v{__version__}") + lines.append(f"Aider-CE v{__version__}") # Model main_model = self.main_model diff --git a/pyproject.toml b/pyproject.toml index 10413f42fbe..5da54b5b4f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ Homepage = "https://github.com/dwash96/aider-ce" [project.scripts] aider-ce = "aider.main:main" +cecli = "aider.main:main" [tool.setuptools.dynamic] dependencies = { file = "requirements/requirements.in" }