diff --git a/README.md b/README.md index c68a4d3a6f1..69e100718cc 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ LLMs are a part of our lives from here on out so join us in learning about and c * [Agent Mode](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/agent-mode.md) * [MCP Configuration](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/mcp.md) * [Session Management](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/sessions.md) +* [Skills](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/skills.md) * [Aider Original Documentation (still mostly applies)](https://aider.chat/) You can see a selection of the enhancements and updates by comparing the help output: @@ -134,7 +135,7 @@ The current priorities are to improve core capabilities and user experience of t * [ ] Add a RAG tool for the model to ask questions about the codebase * [ ] Make the system prompts more aggressive about removing unneeded files/content from the context * [ ] Add a plugin-like system for allowing agent mode to use user-defined tools in simple python files - * [ ] Add a dynamic tool discovery tool to allow the system to have only the tools it needs in context + * [x] Add a dynamic tool discovery tool to allow the system to have only the tools it needs in context ### All Contributors (Both Aider Main and Aider-CE) diff --git a/aider/__init__.py b/aider/__init__.py index 6221a955f03..624bad1f4c0 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.88.40.dev" +__version__ = "0.89.0.dev" safe_version = __version__ try: diff --git a/aider/analytics.py b/aider/analytics.py deleted file mode 100644 index f3eb071c336..00000000000 --- a/aider/analytics.py +++ /dev/null @@ -1,258 +0,0 @@ -import json -import platform -import sys -import time -import uuid -from pathlib import Path - -from mixpanel import MixpanelException -from posthog import Posthog - -from aider import __version__ -from aider.dump import dump # noqa: F401 -from aider.models import model_info_manager - -PERCENT = 10 - - -def compute_hex_threshold(percent): - """Convert percentage to 6-digit hex threshold. - - Args: - percent: Percentage threshold (0-100) - - Returns: - str: 6-digit hex threshold - """ - return format(int(0xFFFFFF * percent / 100), "06x") - - -def is_uuid_in_percentage(uuid_str, percent): - """Check if a UUID string falls within the first X percent of the UUID space. - - Args: - uuid_str: UUID string to test - percent: Percentage threshold (0-100) - - Returns: - bool: True if UUID falls within the first X percent - """ - if not (0 <= percent <= 100): - raise ValueError("Percentage must be between 0 and 100") - - if not uuid_str: - return False - - # Convert percentage to hex threshold (1% = "04...", 10% = "1a...", etc) - # Using first 6 hex digits - if percent == 0: - return False - - threshold = compute_hex_threshold(percent) - return uuid_str[:6] <= threshold - - -mixpanel_project_token = "6da9a43058a5d1b9f3353153921fb04d" -posthog_project_api_key = "phc_99T7muzafUMMZX15H8XePbMSreEUzahHbtWjy3l5Qbv" -posthog_host = "https://us.i.posthog.com" - - -class Analytics: - # providers - mp = None - ph = None - - # saved - user_id = None - permanently_disable = None - asked_opt_in = None - - # ephemeral - logfile = None - - def __init__( - self, - logfile=None, - permanently_disable=False, - posthog_host=None, - posthog_project_api_key=None, - ): - self.logfile = logfile - self.get_or_create_uuid() - self.custom_posthog_host = posthog_host - self.custom_posthog_project_api_key = posthog_project_api_key - - if self.permanently_disable or permanently_disable or not self.asked_opt_in: - self.disable(permanently_disable) - - def enable(self): - if not self.user_id: - self.disable(False) - return - - if self.permanently_disable: - self.disable(True) - return - - if not self.asked_opt_in: - self.disable(False) - return - - # self.mp = Mixpanel(mixpanel_project_token) - self.ph = Posthog( - project_api_key=self.custom_posthog_project_api_key or posthog_project_api_key, - host=self.custom_posthog_host or posthog_host, - on_error=self.posthog_error, - enable_exception_autocapture=True, - super_properties=self.get_system_info(), # Add system info to all events - ) - - def disable(self, permanently): - self.mp = None - self.ph = None - - if permanently: - self.asked_opt_in = True - self.permanently_disable = True - self.save_data() - - def need_to_ask(self, args_analytics): - if args_analytics is False: - return False - - could_ask = not self.asked_opt_in and not self.permanently_disable - if not could_ask: - return False - - if args_analytics is True: - return True - - assert args_analytics is None, args_analytics - - if not self.user_id: - return False - - return is_uuid_in_percentage(self.user_id, PERCENT) - - def get_data_file_path(self): - try: - data_file = Path.home() / ".aider" / "analytics.json" - data_file.parent.mkdir(parents=True, exist_ok=True) - return data_file - except OSError: - # If we can't create/access the directory, just disable analytics - self.disable(permanently=False) - return None - - def get_or_create_uuid(self): - self.load_data() - if self.user_id: - return - - self.user_id = str(uuid.uuid4()) - self.save_data() - - def load_data(self): - data_file = self.get_data_file_path() - if not data_file: - return - - if data_file.exists(): - try: - data = json.loads(data_file.read_text()) - self.permanently_disable = data.get("permanently_disable") - self.user_id = data.get("uuid") - self.asked_opt_in = data.get("asked_opt_in", False) - except (json.decoder.JSONDecodeError, OSError): - self.disable(permanently=False) - - def save_data(self): - data_file = self.get_data_file_path() - if not data_file: - return - - data = dict( - uuid=self.user_id, - permanently_disable=self.permanently_disable, - asked_opt_in=self.asked_opt_in, - ) - - try: - data_file.write_text(json.dumps(data, indent=4)) - except OSError: - # If we can't write the file, just disable analytics - self.disable(permanently=False) - - def get_system_info(self): - return { - "python_version": sys.version.split()[0], - "os_platform": platform.system(), - "os_release": platform.release(), - "machine": platform.machine(), - "aider_version": __version__, - } - - def _redact_model_name(self, model): - if not model: - return None - - info = model_info_manager.get_model_from_cached_json_db(model.name) - if info: - return model.name - elif "/" in model.name: - return model.name.split("/")[0] + "/REDACTED" - return None - - def posthog_error(self): - """disable posthog if we get an error""" - print("X" * 100) - # https://github.com/PostHog/posthog-python/blob/9e1bb8c58afaa229da24c4fb576c08bb88a75752/posthog/consumer.py#L86 - # https://github.com/Aider-AI/aider/issues/2532 - self.ph = None - - def event(self, event_name, main_model=None, **kwargs): - if not self.mp and not self.ph and not self.logfile: - return - - properties = {} - - if main_model: - properties["main_model"] = self._redact_model_name(main_model) - properties["weak_model"] = self._redact_model_name(main_model.weak_model) - properties["editor_model"] = self._redact_model_name(main_model.editor_model) - - properties.update(kwargs) - - # Handle numeric values - for key, value in properties.items(): - if isinstance(value, (int, float)): - properties[key] = value - else: - properties[key] = str(value) - - if self.mp: - try: - self.mp.track(self.user_id, event_name, dict(properties)) - except MixpanelException: - self.mp = None # Disable mixpanel on connection errors - - if self.ph: - self.ph.capture(event_name, distinct_id=self.user_id, properties=dict(properties)) - - if self.logfile: - log_entry = { - "event": event_name, - "properties": properties, - "user_id": self.user_id, - "time": int(time.time()), - } - try: - with open(self.logfile, "a") as f: - json.dump(log_entry, f) - f.write("\n") - except OSError: - pass # Ignore OS errors when writing to logfile - - -if __name__ == "__main__": - dump(compute_hex_threshold(PERCENT)) diff --git a/aider/args.py b/aider/args.py index da383283cef..743fa2316ec 100644 --- a/aider/args.py +++ b/aider/args.py @@ -687,28 +687,28 @@ def get_parser(default_config_files, git_root): "--analytics", action=argparse.BooleanOptionalAction, default=None, - help="Enable/disable analytics for current session (default: random)", + help=argparse.SUPPRESS, ) group.add_argument( "--analytics-log", metavar="ANALYTICS_LOG_FILE", - help="Specify a file to log analytics events", + help=argparse.SUPPRESS, ).complete = shtab.FILE group.add_argument( "--analytics-disable", action="store_true", - help="Permanently disable analytics", + help=argparse.SUPPRESS, default=False, ) group.add_argument( "--analytics-posthog-host", metavar="ANALYTICS_POSTHOG_HOST", - help="Send analytics to custom PostHog instance", + help=argparse.SUPPRESS, ) group.add_argument( "--analytics-posthog-project-api-key", metavar="ANALYTICS_POSTHOG_PROJECT_API_KEY", - help="Send analytics to custom PostHog project", + help=argparse.SUPPRESS, ) ######### @@ -775,7 +775,7 @@ def get_parser(default_config_files, git_root): "--gui", "--browser", action=argparse.BooleanOptionalAction, - help="Run aider in your browser (default: False)", + help=argparse.SUPPRESS, default=False, ) group.add_argument( diff --git a/aider/coders/agent_coder.py b/aider/coders/agent_coder.py index fe7880e8634..b5f1c949791 100644 --- a/aider/coders/agent_coder.py +++ b/aider/coders/agent_coder.py @@ -27,6 +27,9 @@ create_bigram_vector, normalize_vector, ) + +# Import skills helper for skills +from aider.helpers.skills import SkillsManager from aider.mcp.server import LocalServer from aider.repo import ANY_GIT_ERROR @@ -50,10 +53,12 @@ indent_lines, insert_block, list_changes, + load_skill, ls, make_editable, make_readonly, remove, + remove_skill, replace_all, replace_line, replace_lines, @@ -126,6 +131,8 @@ def __init__(self, *args, **kwargs): # Enable context management by default only in agent mode self.context_management_enabled = True # Enabled by default for agent mode + # Skills configuration + self.skills_manager = None # Will be initialized later # Initialize change tracker for granular editing self.change_tracker = ChangeTracker() @@ -193,10 +200,12 @@ def _build_tool_registry(self): indent_lines, insert_block, list_changes, + load_skill, ls, make_editable, make_readonly, remove, + remove_skill, replace_all, replace_line, replace_lines, @@ -219,6 +228,10 @@ def _build_tool_registry(self): "tools_excludelist", agent_config.get("tools_blacklist", []) ) + if "skills" not in self.allowed_context_blocks or not agent_config.get("skills_paths"): + tools_excludelist.append("loadskill") + tools_excludelist.append("removeskill") + # Always include essential tools regardless of includelist/excludelist essential_tools = {"makeeditable", "replacetext", "view", "finished"} for module in tool_modules: @@ -286,6 +299,7 @@ def _get_agent_config(self): "git_status", "symbol_outline", "todo_list", + "skills", } if "exclude_context_blocks" in config: @@ -301,8 +315,52 @@ def _get_agent_config(self): "skip_cli_confirmations", config.get("yolo", False) ) + if "skills" in self.allowed_context_blocks: + # Skills configuration + if "skills_paths" not in config: + config["skills_paths"] = [] + if "skills_includelist" not in config: + config["skills_includelist"] = [] + if "skills_excludelist" not in config: + config["skills_excludelist"] = [] + + self._initialize_skills_manager(config) + return config + def _initialize_skills_manager(self, config): + """ + Initialize the skills manager with the configured directory paths and filters. + """ + if not config.get("skills_paths", []): + return + + try: + git_root = str(self.repo.root) if self.repo else None + self.skills_manager = SkillsManager( + directory_paths=config.get("skills_paths", []), + include_list=config.get("skills_includelist", []), + exclude_list=config.get("skills_excludelist", []), + git_root=git_root, + coder=self, # Pass reference to the coder instance + ) + + except Exception as e: + self.io.tool_warning(f"Failed to initialize skills manager: {str(e)}") + + def show_announcements(self): + super().show_announcements() + + # Find and log available skills + skills = self.skills_manager.find_skills() + if skills: + skills_list = [] + for skill in skills: + skills_list.append(skill.name) + + joined_skills = ", ".join(skills_list) + self.io.tool_output(f"Available Skills: {joined_skills}") + def get_local_tool_schemas(self): """Returns the JSON schemas for all local tools using the tool registry.""" schemas = [] @@ -503,6 +561,8 @@ def _calculate_context_block_tokens(self, force=False): "directory_structure", "git_status", "symbol_outline", + "skills", + "loaded_skills", ] for block_type in block_types: @@ -539,6 +599,10 @@ def _generate_context_block(self, block_name): content = self.get_context_summary() elif block_name == "todo_list": content = self.get_todo_list() + elif block_name == "skills": + content = self.get_skills_context() + elif block_name == "loaded_skills": + content = self.get_skills_content() # Cache the result if it's not None if content is not None: @@ -697,13 +761,16 @@ def format_chat_chunks(self): chunks = ChatChunks( chunk_ordering=[ "system", + "static", "examples", "readonly_files", "repo", "chat_files", + "pre_message", "done", "edit_files", "cur", + "post_message", "reminder", ] ) @@ -760,59 +827,75 @@ def format_chat_chunks(self): # This also populates the context block cache self._calculate_context_block_tokens() - # Get blocks from cache to avoid regenerating them - env_context = self.get_cached_context_block("environment_info") - dir_structure = self.get_cached_context_block("directory_structure") - git_status = self.get_cached_context_block("git_status") - symbol_outline = self.get_cached_context_block("symbol_outline") - todo_list = self.get_cached_context_block("todo_list") - - # Context summary needs special handling because it depends on other blocks - context_summary = self.get_context_summary() + # Initialize chunk sections + chunks.static = [] + chunks.pre_message = [] + chunks.post_message = [] # 1. Add relatively static blocks BEFORE done_messages # These blocks change less frequently and can be part of the cacheable prefix static_blocks = [] - if env_context and "environment_info" in self.allowed_context_blocks: - static_blocks.append(env_context) - if dir_structure and "directory_structure" in self.allowed_context_blocks: - static_blocks.append(dir_structure) - - if static_blocks: - static_message = "\n\n".join(static_blocks) - # Insert as a system message right before done_messages - chunks.system.append(dict(role="system", content=static_message)) # 2. Add dynamic blocks AFTER chat_files # These blocks change with the current files in context - pre_dynamic_blocks = [] - post_dynamic_blocks = [] - if context_summary and "context_summary" in self.allowed_context_blocks: - pre_dynamic_blocks.append(context_summary) - if symbol_outline and "symbol_outline" in self.allowed_context_blocks: - pre_dynamic_blocks.append(symbol_outline) - if git_status and "git_status" in self.allowed_context_blocks: - pre_dynamic_blocks.append(git_status) - - if todo_list and "todo_list" in self.allowed_context_blocks: - pre_dynamic_blocks.append(todo_list) + pre_message_blocks = [] + post_message_blocks = [] + + if "environment_info" in self.allowed_context_blocks: + block = self.get_cached_context_block("environment_info") + static_blocks.append(block) + + if "directory_structure" in self.allowed_context_blocks: + block = self.get_cached_context_block("directory_structure") + static_blocks.append(block) + + if "skills" in self.allowed_context_blocks: + block = self._generate_context_block("skills") + static_blocks.append(block) + + if "symbol_outline" in self.allowed_context_blocks: + block = self.get_cached_context_block("symbol_outline") + pre_message_blocks.append(block) + + if "git_status" in self.allowed_context_blocks: + block = self.get_cached_context_block("git_status") + pre_message_blocks.append(block) + + if "todo_list" in self.allowed_context_blocks: + block = self.get_cached_context_block("todo_list") + pre_message_blocks.append(block) + + if "skills" in self.allowed_context_blocks: + block = self._generate_context_block("loaded_skills") + pre_message_blocks.append(block) + + if "context_summary" in self.allowed_context_blocks: + # Context summary needs special handling because it depends on other blocks + block = self.get_context_summary() + pre_message_blocks.insert(0, block) + # Add tool usage context if there are repetitive tools if hasattr(self, "tool_usage_history") and self.tool_usage_history: repetitive_tools = self._get_repetitive_tools() if repetitive_tools: tool_context = self._generate_tool_context(repetitive_tools) if tool_context: - post_dynamic_blocks.append(tool_context) + post_message_blocks.append(tool_context) - if pre_dynamic_blocks: - dynamic_message = "\n\n".join(pre_dynamic_blocks) - # Append as a system message on reminders - chunks.done.insert(0, dict(role="system", content=dynamic_message)) + if static_blocks: + for block in static_blocks: + if block: + chunks.static.append(dict(role="system", content=block)) + + if pre_message_blocks: + for block in pre_message_blocks: + if block: + chunks.pre_message.append(dict(role="system", content=block)) - if post_dynamic_blocks: - dynamic_message = "\n\n".join(post_dynamic_blocks) - # Append as a system message on reminders - reminder_message.insert(0, dict(role="system", content=dynamic_message)) + if post_message_blocks: + for block in post_message_blocks: + if block: + chunks.post_message.append(dict(role="system", content=block)) # Use accurate token counting method that considers enhanced context blocks base_messages = chunks.all_messages() @@ -855,6 +938,9 @@ def format_chat_chunks(self): ) chunks.cur[-1] = dict(role=final["role"], content=new_content) + if self.verbose: + self._log_chunks(chunks) + return chunks def _update_edit_file_tracking(self, edit_file_names): @@ -2273,6 +2359,38 @@ def get_todo_list(self): self.io.tool_error(f"Error generating todo list context: {str(e)}") return None + def get_skills_context(self): + """ + Generate a context block for available skills. + + Returns: + Formatted context block string or None if no skills available + """ + if not self.use_enhanced_context or not self.skills_manager: + return None + + try: + return self.skills_manager.get_skills_context() + except Exception as e: + self.io.tool_error(f"Error generating skills context: {str(e)}") + return None + + def get_skills_content(self): + """ + Generate a context block with the actual content of loaded skills. + + Returns: + Formatted context block string with skill contents or None if no skills available + """ + if not self.use_enhanced_context or not self.skills_manager: + return None + + try: + return self.skills_manager.get_skills_content() + except Exception as e: + self.io.tool_error(f"Error generating skills content context: {str(e)}") + return None + def get_git_status(self): """ Generate a git status context block for repository information. @@ -2416,3 +2534,46 @@ def cmd_context_blocks(self, args=""): self.tokens_calculated = False return True + + def _log_chunks(self, chunks): + try: + import hashlib + import json + + if not hasattr(self, "_message_hashes"): + self._message_hashes = { + "system": None, + "static": None, + "examples": None, + "readonly_files": None, + "repo": None, + "chat_files": None, + "pre_message": None, + "done": None, + "edit_files": None, + "cur": None, + "post_message": None, + "reminder": None, + } + + changes = [] + for key, value in self._message_hashes.items(): + json_obj = json.dumps( + getattr(chunks, key, ""), sort_keys=True, separators=(",", ":") + ) + new_hash = hashlib.sha256(json_obj.encode("utf-8")).hexdigest() + if self._message_hashes[key] != new_hash: + changes.append(key) + + self._message_hashes[key] = new_hash + + print("") + print("MESSAGE CHUNK HASHES") + print(self._message_hashes) + print("") + print(changes) + print("") + + except Exception as e: + print(e) + pass diff --git a/aider/coders/agent_prompts.py b/aider/coders/agent_prompts.py index d843780d782..68dd74f7c6f 100644 --- a/aider/coders/agent_prompts.py +++ b/aider/coders/agent_prompts.py @@ -27,7 +27,7 @@ class AgentPrompts(CoderPrompts): 1. **Plan**: Determine the necessary changes. Use the `UpdateTodoList` tool to manage your plan. Always begin by the todo list. 2. **Explore**: Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `Grep`) to find relevant files. These tools add files to context as read-only. Use `Grep` first for broad searches to avoid context clutter. Concisely describe your search strategy with the `Thinking` tool. 3. **Think**: Given the contents of your exploration, concisely reason through the edits with the `Thinking` tool that need to be made to accomplish the goal. For complex edits, briefly outline your plan for the user. -4. **Execute**: Use the appropriate editing tool. Remember to use `MakeEditable` on a file before modifying it. Break large edits (those greater than 100 lines) into multiple steps +4. **Execute**: Use the appropriate editing tool. Remember to use `MakeEditable` on a file before modifying it. Break large edits (those greater than ~100 lines) into multiple smaller steps. Proactively use skills if they are available 5. **Verify & Recover**: After every edit, check the resulting diff snippet. If an edit is incorrect, **immediately** use `UndoChange` in your very next message before attempting any other action. 6. **Finished**: Use the `Finished` tool when all tasks and changes needed to accomplish the goal are finished @@ -44,10 +44,11 @@ class AgentPrompts(CoderPrompts): - **Text/Block Manipulation**: `ReplaceText` (Preferred for the majority of edits), `InsertBlock`, `DeleteBlock`, `ReplaceAll` (use with `dry_run=True` for safety). - **Line-Based Edits**: `ReplaceLine(s)`, `DeleteLine(s)`, `IndentLines`. - **Refactoring & History**: `ExtractLines`, `ListChanges`, `UndoChange`. +- **Skill Management**: `LoadSkill`, `RemoveSkill` **MANDATORY Safety Protocol for Line-Based Tools:** Line numbers are fragile. You **MUST** use a two-turn process: 1. **Turn 1**: Use `ShowNumberedContext` to get the exact, current line numbers. -2. **Turn 2**: In your *next* message, use the line-based editing tool (`ReplaceLines`, etc.) with the verified numbers. +2. **Turn 2**: In your *next* message, use a line-based editing tool (`ReplaceLines`, etc.) with the verified numbers. @@ -80,6 +81,7 @@ class AgentPrompts(CoderPrompts): - Any tool call automatically continues to the next turn. Provide no tool calls in your final answer. - Use context blocks (directory structure, git status) to orient yourself. - Remove files you are done with viewing/editing from the context with the `Remove` tool. It is fine to re-add them later +- Remove skills if they are not helpful for your current task with `RemoveSkill` {lazy_prompt} {shell_cmd_reminder} diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 4c93997305e..14146f8afb7 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -35,7 +35,6 @@ from rich.console import Console from aider import __version__, models, prompts, urls, utils -from aider.analytics import Analytics from aider.commands import Commands, SwitchCoder from aider.exceptions import LiteLLMExceptions from aider.helpers import coroutines @@ -151,6 +150,8 @@ class Coder: 25000 # Files larger than this will be truncated when context management is enabled ) + ok_to_warm_cache = False + @classmethod async def create( self, @@ -240,100 +241,6 @@ async def clone(self, **kwargs): new_coder = await Coder.create(from_coder=self, **kwargs) return new_coder - def get_announcements(self): - lines = [] - lines.append(f"Aider-CE v{__version__}") - - # Model - main_model = self.main_model - weak_model = main_model.weak_model - - if weak_model is not main_model: - prefix = "Main model" - else: - prefix = "Model" - - output = f"{prefix}: {main_model.name} with {self.edit_format} edit format" - - # Check for thinking token budget - thinking_tokens = main_model.get_thinking_tokens() - if thinking_tokens: - output += f", {thinking_tokens} think tokens" - - # Check for reasoning effort - reasoning_effort = main_model.get_reasoning_effort() - if reasoning_effort: - output += f", reasoning {reasoning_effort}" - - if self.add_cache_headers or main_model.caches_by_default: - output += ", prompt cache" - if main_model.info.get("supports_assistant_prefill"): - output += ", infinite output" - - lines.append(output) - - if self.edit_format == "architect": - output = ( - f"Editor model: {main_model.editor_model.name} with" - f" {main_model.editor_edit_format} edit format" - ) - lines.append(output) - - if weak_model is not main_model: - output = f"Weak model: {weak_model.name}" - lines.append(output) - - # Repo - if self.repo: - rel_repo_dir = self.repo.get_rel_repo_dir() - num_files = len(self.repo.get_tracked_files()) - - lines.append(f"Git repo: {rel_repo_dir} with {num_files:,} files") - if num_files > 1000: - lines.append( - "Warning: For large repos, consider using --subtree-only and .aiderignore" - ) - lines.append(f"See: {urls.large_repos}") - else: - lines.append("Git repo: none") - - # Repo-map - if self.repo_map: - map_tokens = self.repo_map.max_map_tokens - if map_tokens > 0: - refresh = self.repo_map.refresh - lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh") - max_map_tokens = self.main_model.get_repo_map_tokens() * 2 - if map_tokens > max_map_tokens: - lines.append( - f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much" - " irrelevant code can confuse LLMs." - ) - else: - lines.append("Repo-map: disabled because map_tokens == 0") - else: - lines.append("Repo-map: disabled") - - if self.mcp_tools: - mcp_servers = [] - for server_name, server_tools in self.mcp_tools: - mcp_servers.append(server_name) - lines.append(f"MCP servers configured: {', '.join(mcp_servers)}") - - for fname in self.abs_read_only_stubs_fnames: - rel_fname = self.get_rel_fname(fname) - lines.append(f"Added {rel_fname} to the chat (read-only stub).") - - if self.done_messages: - lines.append("Restored previous conversation history.") - - if self.io.multiline_mode: - lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text") - - return lines - - ok_to_warm_cache = False - def __init__( self, main_model, @@ -365,7 +272,6 @@ def __init__( commands=None, summarizer=None, total_cost=0.0, - analytics=None, map_refresh="auto", cache_prompts=False, num_cache_warming_pings=0, @@ -391,10 +297,6 @@ def __init__( # initialize from args.map_cache_dir self.map_cache_dir = map_cache_dir - # Fill in a dummy Analytics if needed, but it is never .enable()'d - self.analytics = analytics if analytics is not None else Analytics() - - self.event = self.analytics.event self.chat_language = chat_language self.commit_language = commit_language self.commit_before_message = [] @@ -639,11 +541,97 @@ def __init__( self.io.tool_output("JSON Schema:") self.io.tool_output(json.dumps(self.functions, indent=4)) - def setup_lint_cmds(self, lint_cmds): - if not lint_cmds: - return - for lang, cmd in lint_cmds.items(): - self.linter.set_linter(lang, cmd) + def get_announcements(self): + lines = [] + lines.append(f"Aider-CE v{__version__}") + + # Model + main_model = self.main_model + weak_model = main_model.weak_model + + if weak_model is not main_model: + prefix = "Main model" + else: + prefix = "Model" + + output = f"{prefix}: {main_model.name} with {self.edit_format} edit format" + + # Check for thinking token budget + thinking_tokens = main_model.get_thinking_tokens() + if thinking_tokens: + output += f", {thinking_tokens} think tokens" + + # Check for reasoning effort + reasoning_effort = main_model.get_reasoning_effort() + if reasoning_effort: + output += f", reasoning {reasoning_effort}" + + if self.add_cache_headers or main_model.caches_by_default: + output += ", prompt cache" + if main_model.info.get("supports_assistant_prefill"): + output += ", infinite output" + + lines.append(output) + + if self.edit_format == "architect": + output = ( + f"Editor model: {main_model.editor_model.name} with" + f" {main_model.editor_edit_format} edit format" + ) + lines.append(output) + + if weak_model is not main_model: + output = f"Weak model: {weak_model.name}" + lines.append(output) + + # Repo + if self.repo: + rel_repo_dir = self.repo.get_rel_repo_dir() + num_files = len(self.repo.get_tracked_files()) + + lines.append(f"Git repo: {rel_repo_dir} with {num_files:,} files") + if num_files > 1000: + lines.append( + "Warning: For large repos, consider using --subtree-only and .aiderignore" + ) + lines.append(f"See: {urls.large_repos}") + else: + lines.append("Git repo: none") + + # Repo-map + if self.repo_map: + map_tokens = self.repo_map.max_map_tokens + if map_tokens > 0: + refresh = self.repo_map.refresh + lines.append(f"Repo-map: using {map_tokens} tokens, {refresh} refresh") + max_map_tokens = self.main_model.get_repo_map_tokens() * 2 + if map_tokens > max_map_tokens: + lines.append( + f"Warning: map-tokens > {max_map_tokens} is not recommended. Too much" + " irrelevant code can confuse LLMs." + ) + else: + lines.append("Repo-map: disabled because map_tokens == 0") + else: + lines.append("Repo-map: disabled") + + if self.mcp_tools: + mcp_servers = [] + for server_name, server_tools in self.mcp_tools: + mcp_servers.append(server_name) + lines.append(f"MCP servers configured: {', '.join(mcp_servers)}") + + for fname in self.abs_read_only_stubs_fnames: + rel_fname = self.get_rel_fname(fname) + lines.append(f"Added {rel_fname} to the chat (read-only stub).") + + if self.done_messages: + lines.append("Restored previous conversation history.") + + if self.io.multiline_mode: + lines.append("Multiline mode: Enabled. Enter inserts newline, Alt-Enter submits text") + + return lines def show_announcements(self): bold = True @@ -651,6 +639,12 @@ def show_announcements(self): self.io.tool_output(line, bold=bold) bold = False + def setup_lint_cmds(self, lint_cmds): + if not lint_cmds: + return + for lang, cmd in lint_cmds.items(): + self.linter.set_linter(lang, cmd) + def add_rel_fname(self, rel_fname): self.abs_fnames.add(self.abs_root_path(rel_fname)) self.check_added_files() @@ -2135,8 +2129,6 @@ async def check_tokens(self, messages): return True async def send_message(self, inp): - self.event("message_send_starting") - # Notify IO that LLM processing is starting self.io.llm_started() @@ -2234,7 +2226,6 @@ async def send_message(self, inp): lines = traceback.format_exception(type(err), err, err.__traceback__) self.io.tool_warning("".join(lines)) self.io.tool_error(str(err)) - self.event("message_send_exception", exception=str(err)) return finally: if self.mdstream: @@ -3392,19 +3383,6 @@ def show_usage_report(self): self.io.tool_output(self.usage_report) self.io.rule() - prompt_tokens = self.message_tokens_sent - completion_tokens = self.message_tokens_received - self.event( - "message_send", - main_model=self.main_model, - edit_format=self.edit_format, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - cost=self.message_cost, - total_cost=self.total_cost, - ) - self.message_cost = 0.0 self.message_tokens_sent = 0 self.message_tokens_received = 0 diff --git a/aider/coders/chat_chunks.py b/aider/coders/chat_chunks.py index 85407ff0d1c..80edfcaabad 100644 --- a/aider/coders/chat_chunks.py +++ b/aider/coders/chat_chunks.py @@ -5,13 +5,16 @@ @dataclass class ChatChunks: system: List = field(default_factory=list) + static: List = field(default_factory=list) examples: List = field(default_factory=list) + pre_message: List = field(default_factory=list) done: List = field(default_factory=list) repo: List = field(default_factory=list) readonly_files: List = field(default_factory=list) chat_files: List = field(default_factory=list) edit_files: List = field(default_factory=list) cur: List = field(default_factory=list) + post_message: List = field(default_factory=list) reminder: List = field(default_factory=list) chunk_ordering: List = field(default_factory=list) @@ -29,13 +32,16 @@ def all_messages(self): else: return ( self.format_list(self.system) + + self.format_list(self.static) + self.format_list(self.examples) + self.format_list(self.readonly_files) + self.format_list(self.chat_files) + self.format_list(self.repo) + + self.format_list(self.pre_message) + self.format_list(self.done) + self.format_list(self.edit_files) + self.format_list(self.cur) + + self.format_list(self.post_message) + self.format_list(self.reminder) ) diff --git a/aider/commands.py b/aider/commands.py index b209c97a43d..d10db9bb5e3 100644 --- a/aider/commands.py +++ b/aider/commands.py @@ -362,7 +362,6 @@ def matching_commands(self, inp): async def run(self, inp): if inp.startswith("!"): - self.coder.event("command_run") return await self.do_run("run", inp[1:]) res = self.matching_commands(inp) @@ -371,11 +370,9 @@ async def run(self, inp): matching_commands, first_word, rest_inp = res if len(matching_commands) == 1: command = matching_commands[0][1:] - self.coder.event(f"command_{command}") return await self.do_run(command, rest_inp) elif first_word in matching_commands: command = first_word[1:] - self.coder.event(f"command_{command}") return await self.do_run(command, rest_inp) elif len(matching_commands) > 1: self.io.tool_error(f"Ambiguous command: {', '.join(matching_commands)}") @@ -1293,7 +1290,6 @@ async def cmd_run(self, args, add_on_nonzero_exit=False): async def cmd_exit(self, args): "Exit the application" - self.coder.event("exit", reason="/exit") for server in self.coder.mcp_servers: try: @@ -1461,7 +1457,6 @@ async def cmd_help(self, args): self.basic_help() return - self.coder.event("interactive help") from aider.coders.base_coder import Coder if not self.help: @@ -2160,6 +2155,96 @@ def cmd_load_session(self, args): session_manager = sessions.SessionManager(self.coder, self.io) session_manager.load_session(args.strip()) + def completions_load_session(self): + """Return available session names for completion""" + session_manager = sessions.SessionManager(self.coder, self.io) + sessions_list = session_manager.list_sessions() + return [session_info["name"] for session_info in sessions_list] + + def cmd_load_skill(self, args): + """Load a skill by name (agent mode only)""" + if not args.strip(): + self.io.tool_output("Usage: /load-skill ") + return + + skill_name = args.strip() + + # Check if we're in agent mode + if not hasattr(self.coder, "edit_format") or self.coder.edit_format != "agent": + self.io.tool_output("Skill loading is only available in agent mode.") + return + + # Check if skills_manager is available + if not hasattr(self.coder, "skills_manager") or self.coder.skills_manager is None: + self.io.tool_output("Skills manager is not initialized. Skills may not be configured.") + # Check if skills directories are configured + if ( + hasattr(self.coder, "skills_directory_paths") + and not self.coder.skills_directory_paths + ): + self.io.tool_output( + "No skills directories configured. Use --skills-paths to configure skill" + " directories." + ) + return + + # Use the instance method on skills_manager + result = self.coder.skills_manager.load_skill(skill_name) + self.io.tool_output(result) + + def cmd_remove_skill(self, args): + """Remove a skill by name (agent mode only)""" + if not args.strip(): + self.io.tool_output("Usage: /remove-skill ") + return + + skill_name = args.strip() + + # Check if we're in agent mode + if not hasattr(self.coder, "edit_format") or self.coder.edit_format != "agent": + self.io.tool_output("Skill removal is only available in agent mode.") + return + + # Check if skills_manager is available + if not hasattr(self.coder, "skills_manager") or self.coder.skills_manager is None: + self.io.tool_output("Skills manager is not initialized. Skills may not be configured.") + # Check if skills directories are configured + if ( + hasattr(self.coder, "skills_directory_paths") + and not self.coder.skills_directory_paths + ): + self.io.tool_output( + "No skills directories configured. Use --skills-paths to configure skill" + " directories." + ) + return + + # Use the instance method on skills_manager + result = self.coder.skills_manager.remove_skill(skill_name) + self.io.tool_output(result) + + def completions_load_skill(self): + """Return available skill names for completion""" + if not hasattr(self.coder, "skills_manager") or self.coder.skills_manager is None: + return [] + + try: + skills = self.coder.skills_manager.find_skills() + return [skill.name for skill in skills] + except Exception: + return [] + + def completions_remove_skill(self): + """Return currently loaded skill names for completion""" + if not hasattr(self.coder, "skills_manager") or self.coder.skills_manager is None: + return [] + + try: + skills = self.coder.skills_manager.find_skills() + return [skill.name for skill in skills] + except Exception: + return [] + def cmd_command_prefix(self, args=""): """Change Command Prefix For All Running Commands""" if not args.strip(): diff --git a/aider/helpers/file_searcher.py b/aider/helpers/file_searcher.py new file mode 100644 index 00000000000..11a3f656286 --- /dev/null +++ b/aider/helpers/file_searcher.py @@ -0,0 +1,203 @@ +""" +File search utilities for aider. + +This module provides functions for searching and resolving file paths +relative to various directories (git root, home folder, .aider, .cecli, etc.). +""" + +from pathlib import Path +from typing import List, Optional + + +def generate_search_path_list( + default_file: str, git_root: Optional[str], command_line_file: Optional[str] +) -> List[str]: + """ + Generate a list of file paths to search for configuration files. + + The search order is: + 1. Home directory (~/default_file) + 2. Git root directory (git_root/default_file) if git_root is provided + 3. Current directory (default_file) + 4. Command line specified file (command_line_file) if provided + + Args: + default_file: The default filename to search for + git_root: The git root directory (optional) + command_line_file: A file specified on the command line (optional) + + Returns: + List of resolved file paths in search order (first to last) + """ + files = [] + files.append(Path.home() / default_file) # homedir + if git_root: + files.append(Path(git_root) / default_file) # git root + files.append(default_file) + if command_line_file: + files.append(command_line_file) + + resolved_files = [] + for fn in files: + try: + resolved_files.append(Path(fn).expanduser().resolve()) + except OSError: + pass + + files = resolved_files + files.reverse() + uniq = [] + for fn in files: + if fn not in uniq: + uniq.append(fn) + uniq.reverse() + files = uniq + files = list(map(str, files)) + files = list(dict.fromkeys(files)) + + return files + + +def resolve_file_path( + filename: str, + relative_to: str = "auto", + git_root: Optional[str] = None, + search_dirs: Optional[List[str]] = None, +) -> Optional[Path]: + """ + Resolve a file path relative to various directories. + + Args: + filename: The filename to resolve + relative_to: Where to resolve the file from. Options: + - "auto": Try multiple locations (git_root, home, .aider, .cecli, cwd) + - "git": Resolve relative to git root + - "home": Resolve relative to home directory + - "cwd": Resolve relative to current working directory + - "aider": Resolve relative to .aider directory in git root or home + - "cecli": Resolve relative to .cecli directory in git root or home + git_root: The git root directory (optional, required for some modes) + search_dirs: Additional directories to search (optional) + + Returns: + Resolved Path object if found, None otherwise + """ + if relative_to == "auto": + # Try multiple locations in order of preference + locations = [] + + # 1. Git root (if available) + if git_root: + locations.append(Path(git_root) / filename) + + # 2. Home directory + locations.append(Path.home() / filename) + + # 3. .aider directories + if git_root: + locations.append(Path(git_root) / ".aider" / filename) + locations.append(Path.home() / ".aider" / filename) + + # 4. .cecli directories + if git_root: + locations.append(Path(git_root) / ".cecli" / filename) + locations.append(Path.home() / ".cecli" / filename) + + # 5. Current working directory + locations.append(Path.cwd() / filename) + + # 6. Additional search directories + if search_dirs: + for dir_path in search_dirs: + locations.append(Path(dir_path) / filename) + + # Try each location + for location in locations: + try: + resolved = location.expanduser().resolve() + if resolved.exists(): + return resolved + except OSError: + continue + + return None + + elif relative_to == "git": + if not git_root: + raise ValueError("git_root is required when relative_to='git'") + return (Path(git_root) / filename).expanduser().resolve() + + elif relative_to == "home": + return (Path.home() / filename).expanduser().resolve() + + elif relative_to == "cwd": + return (Path.cwd() / filename).expanduser().resolve() + + elif relative_to == "aider": + # Try git root first, then home + if git_root: + aider_path = Path(git_root) / ".aider" / filename + try: + resolved = aider_path.expanduser().resolve() + if resolved.exists(): + return resolved + except OSError: + pass + + # Fall back to home directory + return (Path.home() / ".aider" / filename).expanduser().resolve() + + elif relative_to == "cecli": + # Try git root first, then home + if git_root: + cecli_path = Path(git_root) / ".cecli" / filename + try: + resolved = cecli_path.expanduser().resolve() + if resolved.exists(): + return resolved + except OSError: + pass + + # Fall back to home directory + return (Path.home() / ".cecli" / filename).expanduser().resolve() + + else: + raise ValueError(f"Invalid relative_to value: {relative_to}") + + +def find_config_file( + config_name: str, + git_root: Optional[str] = None, + command_line_file: Optional[str] = None, + config_dirs: Optional[List[str]] = None, +) -> Optional[Path]: + """ + Find a configuration file using the standard search path. + + This is a higher-level function that uses generate_search_path_list + to find configuration files. + + Args: + config_name: The configuration filename (e.g., ".env", ".aider.conf.yml") + git_root: The git root directory (optional) + command_line_file: A file specified on the command line (optional) + config_dirs: Additional directories to search (optional) + + Returns: + Path to the first existing configuration file found, or None + """ + # Generate standard search path + search_paths = generate_search_path_list(config_name, git_root, command_line_file) + + # Add additional config directories if provided + if config_dirs: + for dir_path in config_dirs: + search_paths.append(str(Path(dir_path) / config_name)) + + # Check each path + for file_path in search_paths: + path_obj = Path(file_path) + if path_obj.exists(): + return path_obj.resolve() + + return None diff --git a/aider/helpers/skills.py b/aider/helpers/skills.py new file mode 100644 index 00000000000..b57c1e0d313 --- /dev/null +++ b/aider/helpers/skills.py @@ -0,0 +1,577 @@ +""" +Skills helper for aider. + +This module provides functions for loading, parsing, and managing skills +according to the Skills specification. +""" + +import re +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional + +import yaml + + +@dataclass +class SkillMetadata: + """Metadata for an skill.""" + + name: str + description: str + path: Path + license: Optional[str] = None + allowed_tools: List[str] = field(default_factory=list) + metadata: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class SkillContent: + """Complete skill content including metadata and instructions.""" + + metadata: SkillMetadata + frontmatter: Dict[str, Any] + instructions: str + references: Dict[str, Path] = field(default_factory=dict) + scripts: Dict[str, Path] = field(default_factory=dict) + assets: Dict[str, Path] = field(default_factory=dict) + + +class SkillsManager: + """Manager for loading and managing skills.""" + + def __init__( + self, + directory_paths: List[str], + include_list: Optional[List[str]] = None, + exclude_list: Optional[List[str]] = None, + git_root: Optional[str] = None, + coder=None, + ): + """ + Initialize the skills manager. + + Args: + directory_paths: List of directory paths to search for skills + include_list: Optional list of skill names to include (whitelist) + exclude_list: Optional list of skill names to exclude (blacklist) + git_root: Optional git root directory for relative path resolution + coder: Optional reference to the coder instance (weak reference) + """ + self.directory_paths = [Path(p).expanduser().resolve() for p in directory_paths] + self.include_list = set(include_list) if include_list else None + self.exclude_list = set(exclude_list) if exclude_list else set() + self.git_root = Path(git_root).expanduser().resolve() if git_root else None + self.coder = coder # Weak reference to coder instance + + # Cache for loaded skills + self._skills_cache: Dict[str, SkillContent] = {} + self._skill_metadata_cache: Dict[str, SkillMetadata] = {} + self._skills_find_cache: Optional[List[SkillMetadata]] = None + + # Track which skills have been loaded via load_skill() + self._loaded_skills: set[str] = set() + + def find_skills(self, reload: bool = False) -> List[SkillMetadata]: + """ + Find all skills in the configured directory paths. + + Args: + reload: If True, force reload from disk instead of using cache + + Returns: + List of skill metadata objects + """ + # Return cached results if available and not forced to reload + if not reload and self._skills_find_cache is not None: + return self._skills_find_cache + + skills = [] + + for directory_path in self.directory_paths: + if not directory_path.exists(): + continue + + # Look for directories containing SKILL.md files + for skill_dir in directory_path.iterdir(): + if not skill_dir.is_dir(): + continue + + skill_md_path = skill_dir / "SKILL.md" + if skill_md_path.exists(): + try: + metadata = self._parse_skill_metadata(skill_md_path) + skill_name = metadata.name + + # Apply include/exclude filters + if self.include_list and skill_name not in self.include_list: + continue + if skill_name in self.exclude_list: + continue + + skills.append(metadata) + self._skill_metadata_cache[skill_name] = metadata + except Exception: + # Skip skills that can't be parsed + continue + + # Cache the results + self._skills_find_cache = skills + return skills + + def _parse_skill_metadata(self, skill_md_path: Path) -> SkillMetadata: + """ + Parse the metadata from a SKILL.md file. + + Args: + skill_md_path: Path to the SKILL.md file + + Returns: + SkillMetadata object + """ + content = skill_md_path.read_text(encoding="utf-8") + + # Parse YAML frontmatter (between --- markers) + frontmatter_match = re.search( + r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL | re.MULTILINE + ) + if not frontmatter_match: + raise ValueError(f"No YAML frontmatter found in {skill_md_path}") + + frontmatter = yaml.safe_load(frontmatter_match.group(1)) + + # Extract required fields + name = frontmatter.get("name") + description = frontmatter.get("description") + + if not name or not description: + raise ValueError(f"Missing required fields (name or description) in {skill_md_path}") + + return SkillMetadata( + name=name, + description=description, + path=skill_md_path.parent, + license=frontmatter.get("license"), + allowed_tools=frontmatter.get("allowed-tools", []), + metadata=frontmatter.get("metadata", {}), + ) + + def get_skill_content(self, skill_name: str) -> Optional[SkillContent]: + """ + Get skill content by name (loads and caches if not already loaded). + + Args: + skill_name: Name of the skill to get + + Returns: + SkillContent object or None if not found + """ + # Check cache first + if skill_name in self._skills_cache: + return self._skills_cache[skill_name] + + # Find the skill metadata + if skill_name not in self._skill_metadata_cache: + # Try to find it + skills = self.find_skills() + skill_metadata = next((s for s in skills if s.name == skill_name), None) + if not skill_metadata: + return None + self._skill_metadata_cache[skill_name] = skill_metadata + else: + skill_metadata = self._skill_metadata_cache[skill_name] + + # Load the complete skill + skill_content = self._load_complete_skill(skill_metadata) + self._skills_cache[skill_name] = skill_content + + return skill_content + + def _load_complete_skill(self, metadata: SkillMetadata) -> SkillContent: + """ + Load a complete skill including all components. + + Args: + metadata: SkillMetadata object + + Returns: + SkillContent object + """ + skill_dir = metadata.path + + # Load SKILL.md content + skill_md_path = skill_dir / "SKILL.md" + content = skill_md_path.read_text(encoding="utf-8") + + # Parse frontmatter and instructions + frontmatter_match = re.search( + r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL | re.MULTILINE + ) + if not frontmatter_match: + raise ValueError(f"No YAML frontmatter found in {skill_md_path}") + + frontmatter = yaml.safe_load(frontmatter_match.group(1)) + instructions = content[frontmatter_match.end() :].strip() + + # Load references + references = self._load_references(skill_dir) + + # Load scripts + scripts = self._load_scripts(skill_dir) + + # Load assets + assets = self._load_assets(skill_dir) + + return SkillContent( + metadata=metadata, + frontmatter=frontmatter, + instructions=instructions, + references=references, + scripts=scripts, + assets=assets, + ) + + def _load_references(self, skill_dir: Path) -> Dict[str, Path]: + """Load reference files from the references/ directory.""" + references = {} + references_dir = skill_dir / "references" + + if references_dir.exists(): + for ref_file in references_dir.glob("**/*.md"): + try: + # Use relative path as key, store the Path object + rel_path = ref_file.relative_to(references_dir) + references[str(rel_path)] = ref_file + except Exception: + continue + + return references + + def _load_scripts(self, skill_dir: Path) -> Dict[str, Path]: + """Load script files from the scripts/ directory.""" + scripts = {} + scripts_dir = skill_dir / "scripts" + + if scripts_dir.exists(): + for script_file in scripts_dir.glob("**/*"): + if script_file.is_file(): + try: + # Use relative path as key, store the Path object + rel_path = script_file.relative_to(scripts_dir) + scripts[str(rel_path)] = script_file + except Exception: + continue + + return scripts + + def _load_assets(self, skill_dir: Path) -> Dict[str, Path]: + """Load asset files from the assets/ directory.""" + assets = {} + assets_dir = skill_dir / "assets" + + if assets_dir.exists(): + for asset_file in assets_dir.glob("**/*"): + if asset_file.is_file(): + try: + # Use relative path as key, store the Path object + rel_path = asset_file.relative_to(assets_dir) + assets[str(rel_path)] = asset_file + except Exception: + continue + + return assets + + def get_skill_summary(self, skill_name: str) -> Optional[str]: + """ + Get a summary of a skill for display purposes. + + Args: + skill_name: Name of the skill + + Returns: + Summary string or None if skill not found + """ + skill = self.get_skill_content(skill_name) + if not skill: + return None + + summary = f"Skill: {skill.metadata.name}\n" + summary += f"Description: {skill.metadata.description}\n" + + if skill.metadata.license: + summary += f"License: {skill.metadata.license}\n" + + if skill.metadata.allowed_tools: + summary += f"Allowed tools: {', '.join(skill.metadata.allowed_tools)}\n" + + summary += f"Path: {skill.metadata.path}\n" + + # Count resources + ref_count = len(skill.references) + script_count = len(skill.scripts) + asset_count = len(skill.assets) + + summary += ( + f"Resources: {ref_count} references, {script_count} scripts, {asset_count} assets\n" + ) + + return summary + + def get_all_skill_summaries(self) -> Dict[str, str]: + """ + Get summaries for all available skills. + + Returns: + Dictionary mapping skill names to summary strings + """ + skills = self.find_skills() + summaries = {} + + for skill_metadata in skills: + summary = self.get_skill_summary(skill_metadata.name) + if summary: + summaries[skill_metadata.name] = summary + + return summaries + + def load_skill(self, skill_name: str) -> str: + """ + Add a skill to the loaded skills set for inclusion in context. + + Returns: + Success or error message + """ + if not skill_name: + return "Error: Skill name is required." + + # Check if coder is available + if not self.coder: + return "Error: Skills manager not connected to a coder instance." + + # Check if we're in agent mode + if not hasattr(self.coder, "edit_format") or self.coder.edit_format != "agent": + return "Error: Skill loading is only available in agent mode." + + # Check if skill is already loaded + if skill_name in self._loaded_skills: + return f"Skill '{skill_name}' is already loaded." + + # Find the skill to verify it exists + skills = self.find_skills() + skill_found = any(skill.name == skill_name for skill in skills) + + if skill_found: + # Load the skill content + skill_content = self.get_skill_content(skill_name) + + if skill_content: + # Add to loaded skills set + self._loaded_skills.add(skill_name) + + result = f"Skill '{skill_name}' loaded successfully." + + # Show skill summary + summary = self.get_skill_summary(skill_name) + if summary: + result += f"\n\n{summary}" + return result + else: + return f"Error: Skill '{skill_name}' found but could not be loaded." + else: + return f"Error: Skill '{skill_name}' not found in configured directories." + + def remove_skill(self, skill_name: str) -> str: + """ + Remove a skill from the loaded skills set. + + Returns: + Success or error message + """ + if not skill_name: + return "Error: Skill name is required." + + # Check if coder is available + if not self.coder: + return "Error: Skills manager not connected to a coder instance." + + # Check if we're in agent mode + if not hasattr(self.coder, "edit_format") or self.coder.edit_format != "agent": + return "Error: Skill removal is only available in agent mode." + + # Check if skill is already removed + if skill_name not in self._loaded_skills: + return f"Skill '{skill_name}' is not loaded." + + # Remove from loaded skills set + self._loaded_skills.remove(skill_name) + + return f"Skill '{skill_name}' removed successfully." + + @classmethod + def skill_summary_loader( + cls, + directory_paths: List[str], + include_list: Optional[List[str]] = None, + exclude_list: Optional[List[str]] = None, + git_root: Optional[str] = None, + ) -> str: + """ + High-level function to load and summarize all available skills. + + Args: + directory_paths: List of directory paths to search for skills + include_list: Optional list of skill names to include (whitelist) + exclude_list: Optional list of skill names to exclude (blacklist) + git_root: Optional git root directory for relative path resolution + + Returns: + Formatted summary of all available skills + """ + manager = cls(directory_paths, include_list, exclude_list, git_root) + summaries = manager.get_all_skill_summaries() + + if not summaries: + return "No skills found in the specified directories." + + result = f"Found {len(summaries)} skill(s):\n\n" + + for i, (skill_name, summary) in enumerate(summaries.items(), 1): + result += f"{i}. {summary}\n" + + return result + + @staticmethod + def resolve_skill_directories( + base_paths: List[str], git_root: Optional[str] = None + ) -> List[Path]: + """ + Resolve skill directory paths relative to various locations. + + Args: + base_paths: List of base directory paths + git_root: Optional git root directory + + Returns: + List of resolved Path objects + """ + resolved_paths = [] + + for base_path in base_paths: + # Try to resolve relative to git root first + if git_root and not Path(base_path).is_absolute(): + git_path = Path(git_root) / base_path + if git_path.exists(): + resolved_paths.append(git_path.resolve()) + continue + + # Try as absolute or relative to current directory + try: + path = Path(base_path).expanduser().resolve() + if path.exists(): + resolved_paths.append(path) + except Exception: + continue + + return resolved_paths + + def get_skills_content(self) -> Optional[str]: + """ + Generate a context block with skill metadata and file paths for references, scripts, and assets. + + Returns: + Formatted context block string with skill metadata and file paths or None if no skills available + """ + try: + # Only return skills that have been explicitly loaded via load_skill() + if not self._loaded_skills: + return None + + result = '\n' + result += "## Loaded Skills Content\n\n" + result += f"Found {len(self._loaded_skills)} skill(s) in configured directories:\n\n" + + for i, skill_name in enumerate(sorted(self._loaded_skills)): + # Load the complete skill (should be cached) + skill_content = self.get_skill_content(skill_name) + if not skill_content: + continue + + result += f"### Skill {i}: {skill_content.metadata.name}\n\n" + result += f"**Description**: {skill_content.metadata.description}\n\n" + + if skill_content.metadata.license: + result += f"**License**: {skill_content.metadata.license}\n\n" + + if skill_content.metadata.allowed_tools: + result += ( + f"**Allowed Tools**: {', '.join(skill_content.metadata.allowed_tools)}\n\n" + ) + + # Add instructions + result += "#### Instructions\n\n" + result += f"{skill_content.instructions}\n\n" + + # Add references file paths + if skill_content.references: + result += "#### References\n\n" + result += f"Available reference files ({len(skill_content.references)}):\n\n" + for ref_name, ref_path in skill_content.references.items(): + result += f"- **{ref_name}**: `{ref_path}`\n" + result += "\n" + + # Add scripts file paths + if skill_content.scripts: + result += "#### Scripts\n\n" + result += f"Available script files ({len(skill_content.scripts)}):\n\n" + for script_name, script_path in skill_content.scripts.items(): + result += f"- **{script_name}**: `{script_path}`\n" + result += "\n" + + # Add assets file paths + if skill_content.assets: + result += f"#### Assets ({len(skill_content.assets)} file(s))\n\n" + result += "Available asset files:\n\n" + for asset_name, asset_path in skill_content.assets.items(): + result += f"- **{asset_name}**: `{asset_path}`\n" + result += "\n" + + result += "---\n\n" + + result += "" + return result + except Exception: + # We can't use io.tool_error here since we don't have access to io + # The caller should handle the exception + raise + + def get_skills_context(self) -> Optional[str]: + """ + Generate a context block for available skills. + + Returns: + Formatted context block string or None if no skills available + """ + try: + # Get skill summaries + summaries = self.get_all_skill_summaries() + if not summaries: + return None + + result = '\n' + result += "## Available Skills\n\n" + result += f"Found {len(summaries)} skill(s) in configured directories:\n\n" + + for i, (skill_name, summary) in enumerate(summaries.items(), 1): + result += f"### Skill {i}: {skill_name}\n\n" + result += f"{summary}\n" + + result += ( + "Use the `LoadSkill` tool with the skill name if " + "the skill is relevant to the current task." + ) + result += "" + return result + except Exception: + # We can't use io.tool_error here since we don't have access to io + # The caller should handle the exception + raise diff --git a/aider/main.py b/aider/main.py index 5fc2a9eaf2c..0ac1bbd54ff 100644 --- a/aider/main.py +++ b/aider/main.py @@ -22,7 +22,6 @@ from prompt_toolkit.enums import EditingMode from aider import __version__, models, urls, utils -from aider.analytics import Analytics from aider.args import get_parser from aider.coders import Coder from aider.coders.base_coder import UnknownEditFormat @@ -30,6 +29,7 @@ from aider.copypaste import ClipboardWatcher from aider.deprecated import handle_deprecated_model_args from aider.format_settings import format_settings, scrub_sensitive_info +from aider.helpers.file_searcher import generate_search_path_list from aider.history import ChatSummary from aider.io import InputOutput from aider.llm import litellm # noqa: F401; properly init litellm on launch @@ -212,76 +212,6 @@ async def check_gitignore(git_root, io, ask=True): io.tool_output(f" {pattern}") -async def check_streamlit_install(io): - return await utils.check_pip_install_extra( - io, - "streamlit", - "You need to install the aider browser feature", - ["aider-ce[browser]"], - ) - - -async def write_streamlit_credentials(): - from streamlit.file_util import get_streamlit_file_path - - # See https://github.com/Aider-AI/aider/issues/772 - - credential_path = Path(get_streamlit_file_path()) / "credentials.toml" - if not os.path.exists(credential_path): - empty_creds = '[general]\nemail = ""\n' - - os.makedirs(os.path.dirname(credential_path), exist_ok=True) - with open(credential_path, "w") as f: - f.write(empty_creds) - else: - print("Streamlit credentials already exist.") - - -def launch_gui(args): - from streamlit.web import cli - - from aider import gui - - print() - print("CONTROL-C to exit...") - - # Necessary so streamlit does not prompt the user for an email address. - write_streamlit_credentials() - - target = gui.__file__ - - st_args = ["run", target] - - st_args += [ - "--browser.gatherUsageStats=false", - "--runner.magicEnabled=false", - "--server.runOnSave=false", - ] - - # https://github.com/Aider-AI/aider/issues/2193 - is_dev = "-dev" in str(__version__) - - if is_dev: - print("Watching for file changes.") - else: - st_args += [ - "--global.developmentMode=false", - "--server.fileWatcherType=none", - "--client.toolbarMode=viewer", # minimal? - ] - - st_args += ["--"] + args - - cli.main(st_args) - - # from click.testing import CliRunner - # runner = CliRunner() - # from streamlit.web import bootstrap - # bootstrap.load_config_options(flag_options={}) - # cli.main_run(target, args) - # sys.argv = ['streamlit', 'run', '--'] + args - - def parse_lint_cmds(lint_cmds, io): err = False res = dict() @@ -309,36 +239,6 @@ def parse_lint_cmds(lint_cmds, io): return res -def generate_search_path_list(default_file, git_root, command_line_file): - files = [] - files.append(Path.home() / default_file) # homedir - if git_root: - files.append(Path(git_root) / default_file) # git root - files.append(default_file) - if command_line_file: - files.append(command_line_file) - - resolved_files = [] - for fn in files: - try: - resolved_files.append(Path(fn).expanduser().resolve()) - except OSError: - pass - - files = resolved_files - files.reverse() - uniq = [] - for fn in files: - if fn not in uniq: - uniq.append(fn) - uniq.reverse() - files = uniq - files = list(map(str, files)) - files = list(dict.fromkeys(files)) - - return files - - def register_models(git_root, model_settings_fname, io, verbose=False): model_settings_files = generate_search_path_list( ".aider.model.settings.yml", git_root, model_settings_fname @@ -624,10 +524,6 @@ async def main_async(argv=None, input=None, output=None, force_git_root=None, re if git is None: args.git = False - if args.analytics_disable: - analytics = Analytics(permanently_disable=True) - print("Analytics have been permanently disabled.") - if not args.verify_ssl: import httpx @@ -746,44 +642,6 @@ def get_io(pretty): ) os.environ["OPENAI_ORGANIZATION"] = args.openai_organization_id - analytics = Analytics( - logfile=args.analytics_log, - permanently_disable=args.analytics_disable, - posthog_host=args.analytics_posthog_host, - posthog_project_api_key=args.analytics_posthog_project_api_key, - ) - - # if args.analytics is not False: - # if analytics.need_to_ask(args.analytics): - # io.tool_output( - # "Aider respects your privacy and never collects your code, chat messages, keys or" - # " personal info." - # ) - # io.tool_output(f"For more info: {urls.analytics}") - # disable = not await io.confirm_ask( - # "Allow collection of anonymous analytics to help improve aider?" - # ) - # analytics.asked_opt_in = True - # if disable: - # analytics.disable(permanently=True) - # io.tool_output("Analytics have been permanently disabled.") - # analytics.save_data() - # io.tool_output() - # # This is a no-op if the user has opted out - # analytics.enable() - - analytics.disable(permanently=True) - analytics.event("launched") - - if args.gui and not return_coder: - if not await check_streamlit_install(io): - analytics.event("exit", reason="Streamlit not installed") - return await graceful_exit(None) - analytics.event("gui session") - launch_gui(argv) - analytics.event("exit", reason="GUI session ended") - return await graceful_exit(None) - if args.verbose: for fname in loaded_dotenvs: io.tool_output(f"Loaded {fname}") @@ -814,7 +672,7 @@ def get_io(pretty): io.tool_output( "Provide either a single directory of a git repo, or a list of one or more files." ) - analytics.event("exit", reason="Invalid directory input") + return await graceful_exit(None, 1) git_dname = None @@ -825,7 +683,7 @@ def get_io(pretty): fnames = [] else: io.tool_error(f"{all_files[0]} is a directory, but --no-git selected.") - analytics.event("exit", reason="Directory with --no-git") + return await graceful_exit(None, 1) # We can't know the git repo for sure until after parsing the args. @@ -834,22 +692,21 @@ def get_io(pretty): if args.git and not force_git_root and git is not None: right_repo_root = guessed_wrong_repo(io, git_root, fnames, git_dname) if right_repo_root: - analytics.event("exit", reason="Recursing with correct repo") return await main_async(argv, input, output, right_repo_root, return_coder=return_coder) if args.just_check_update: update_available = await check_version(io, just_check=True, verbose=args.verbose) - analytics.event("exit", reason="Just checking update") + return await graceful_exit(None, 0 if not update_available else 1) if args.install_main_branch: success = await install_from_main_branch(io) - analytics.event("exit", reason="Installed main branch") + return await graceful_exit(None, 0 if success else 1) if args.upgrade: success = await install_upgrade(io) - analytics.event("exit", reason="Upgrade completed") + return await graceful_exit(None, 0 if success else 1) if args.check_update: @@ -871,7 +728,7 @@ def get_io(pretty): if args.list_models: models.print_matching_models(io, args.list_models) - analytics.event("exit", reason="Listed models") + return await graceful_exit(None) # Process any command line aliases @@ -882,14 +739,14 @@ def get_io(pretty): if len(parts) != 2: io.tool_error(f"Invalid alias format: {alias_def}") io.tool_output("Format should be: alias:model-name") - analytics.event("exit", reason="Invalid alias format error") + return await graceful_exit(None, 1) alias, model = parts models.MODEL_ALIASES[alias.strip()] = model.strip() - selected_model_name = await select_default_model(args, io, analytics) + selected_model_name = await select_default_model(args, io) if not selected_model_name: - # Error message and analytics event are handled within select_default_model + # Error message is handled within select_default_model # It might have already offered OAuth if no model/keys were found. # If it failed here, we exit. return await graceful_exit(None, 1) @@ -902,7 +759,7 @@ def get_io(pretty): " found." ) # Attempt OAuth flow because the specific model needs it - if await offer_openrouter_oauth(io, analytics): + if await offer_openrouter_oauth(io): # OAuth succeeded, the key should now be in os.environ. # Check if the key is now present after the flow. if os.environ.get("OPENROUTER_API_KEY"): @@ -915,10 +772,6 @@ def get_io(pretty): io.tool_error( "OpenRouter authentication seemed successful, but the key is still missing." ) - analytics.event( - "exit", - reason="OpenRouter key missing after successful OAuth for specified model", - ) return await graceful_exit(None, 1) else: # OAuth failed or was declined by the user @@ -928,10 +781,6 @@ def get_io(pretty): await io.offer_url( urls.models_and_keys, "Open documentation URL for more info?", acknowledge=True ) - analytics.event( - "exit", - reason="OpenRouter key missing for specified model and OAuth failed/declined", - ) return await graceful_exit(None, 1) main_model = models.Model( @@ -999,7 +848,6 @@ def get_io(pretty): lint_cmds = parse_lint_cmds(args.lint_cmd, io) if lint_cmds is None: - analytics.event("exit", reason="Invalid lint command format") return await graceful_exit(None, 1) repo = None @@ -1025,15 +873,8 @@ def get_io(pretty): if not args.skip_sanity_check_repo: if not await sanity_check_repo(repo, io): - analytics.event("exit", reason="Repository sanity check failed") return await graceful_exit(None, 1) - if repo and not args.skip_sanity_check_repo: - num_files = len(repo.get_tracked_files()) - analytics.event("repo", num_files=num_files) - else: - analytics.event("no-repo") - commands = Commands( io, None, @@ -1073,9 +914,6 @@ def get_io(pretty): if max_input_tokens: args.context_compaction_max_tokens = int(max_input_tokens * 0.8) - # Track auto-commits configuration - analytics.event("auto_commits", enabled=bool(args.auto_commits)) - try: # Load MCP servers from config string or file mcp_servers = load_mcp_servers( @@ -1109,7 +947,6 @@ def get_io(pretty): test_cmd=args.test_cmd, commands=commands, summarizer=summarizer, - analytics=analytics, map_refresh=args.map_refresh, cache_prompts=args.cache_prompts, map_mul_no_files=args.map_multiplier_no_files, @@ -1135,7 +972,6 @@ def get_io(pretty): if args.show_model_warnings: problem = await models.sanity_check_models(io, main_model) if problem: - analytics.event("model warning", main_model=main_model) io.tool_output("You can skip this check with --no-show-model-warnings") try: @@ -1146,7 +982,6 @@ def get_io(pretty): ) io.tool_output() except KeyboardInterrupt: - analytics.event("exit", reason="Keyboard interrupt during model warnings") return await graceful_exit(coder, 1) if args.git: @@ -1159,16 +994,15 @@ def get_io(pretty): await io.offer_url( urls.edit_formats, "Open documentation about edit formats?", acknowledge=True ) - analytics.event("exit", reason="Unknown edit format") + return await graceful_exit(None, 1) except ValueError as err: io.tool_error(str(err)) - analytics.event("exit", reason="ValueError during coder creation") + return await graceful_exit(None, 1) if return_coder: - analytics.event("exit", reason="Returning coder object") return coder ignores = [] @@ -1182,13 +1016,11 @@ def get_io(pretty): coder, gitignores=ignores, verbose=args.verbose, - analytics=analytics, root=str(Path.cwd()) if args.subtree_only else None, ) coder.file_watcher = file_watcher if args.copy_paste: - analytics.event("copy-paste mode") ClipboardWatcher(coder.io, verbose=args.verbose) if args.show_prompts: @@ -1197,7 +1029,7 @@ def get_io(pretty): ] messages = coder.format_messages().all_messages() utils.show_messages(messages) - analytics.event("exit", reason="Showed prompts") + return await graceful_exit(coder) if args.lint: @@ -1206,7 +1038,7 @@ def get_io(pretty): if args.test: if not args.test_cmd: io.tool_error("No --test-cmd provided.") - analytics.event("exit", reason="No test command provided") + return await graceful_exit(coder, 1) await coder.commands.cmd_test(args.test_cmd) if io.placeholder: @@ -1219,27 +1051,25 @@ def get_io(pretty): await coder.commands.cmd_commit() if args.lint or args.test or args.commit: - analytics.event("exit", reason="Completed lint/test/commit") return await graceful_exit(coder) if args.show_repo_map: repo_map = coder.get_repo_map() if repo_map: io.tool_output(repo_map) - analytics.event("exit", reason="Showed repo map") + return await graceful_exit(coder) if args.apply: content = io.read_text(args.apply) if content is None: - analytics.event("exit", reason="Failed to read apply content") return await graceful_exit(coder) coder.partial_response_content = content # For testing #2879 # from aider.coders.base_coder import all_fences # coder.fence = all_fences[1] await coder.apply_updates() - analytics.event("exit", reason="Applied updates") + return await graceful_exit(coder) if args.apply_clipboard_edits: @@ -1281,7 +1111,7 @@ def get_io(pretty): await coder.run(with_message=args.message) except (SwitchCoder, KeyboardInterrupt, SystemExit): pass - analytics.event("exit", reason="Completed --message") + return await graceful_exit(coder) if args.message_file: @@ -1293,22 +1123,18 @@ def get_io(pretty): pass except FileNotFoundError: io.tool_error(f"Message file not found: {args.message_file}") - analytics.event("exit", reason="Message file not found") + return await graceful_exit(coder, 1) except IOError as e: io.tool_error(f"Error reading message file: {e}") - analytics.event("exit", reason="Message file IO error") + return await graceful_exit(coder, 1) - analytics.event("exit", reason="Completed --message-file") return await graceful_exit(coder) if args.exit: - analytics.event("exit", reason="Exit flag set") return await graceful_exit(coder) - analytics.event("cli session", main_model=main_model, edit_format=main_model.edit_format) - # Auto-load session if enabled if args.auto_load: try: @@ -1326,7 +1152,7 @@ def get_io(pretty): try: coder.ok_to_warm_cache = bool(args.cache_keepalive_pings) await coder.run() - analytics.event("exit", reason="Completed main CLI coder.run") + return await graceful_exit(coder) except SwitchCoder as switch: coder.ok_to_warm_cache = False @@ -1349,7 +1175,6 @@ def get_io(pretty): if switch.kwargs.get("show_announcements") is False: coder.suppress_announcements_for_next_prompt = True except SystemExit: - analytics.event("exit", reason="/exit command") sys.settrace(None) return await graceful_exit(coder) diff --git a/aider/onboarding.py b/aider/onboarding.py index 0299d8cb6f4..e3660a9a909 100644 --- a/aider/onboarding.py +++ b/aider/onboarding.py @@ -76,13 +76,12 @@ def try_to_select_default_model(): return None -async def offer_openrouter_oauth(io, analytics): +async def offer_openrouter_oauth(io): """ Offers OpenRouter OAuth flow to the user if no API keys are found. Args: io: The InputOutput object for user interaction. - analytics: The Analytics object for tracking events. Returns: True if authentication was successful, False otherwise. @@ -95,26 +94,22 @@ async def offer_openrouter_oauth(io, analytics): default="y", acknowledge=True, ): - analytics.event("oauth_flow_initiated", provider="openrouter") - openrouter_key = start_openrouter_oauth_flow(io, analytics) + openrouter_key = start_openrouter_oauth_flow(io) if openrouter_key: # Successfully got key via OAuth, use the default OpenRouter model # Ensure OPENROUTER_API_KEY is now set in the environment for later use os.environ["OPENROUTER_API_KEY"] = openrouter_key - # Track OAuth success leading to model selection - analytics.event("oauth_flow_success") return True # OAuth failed or was cancelled by user implicitly (e.g., closing browser) # Error messages are handled within start_openrouter_oauth_flow - analytics.event("oauth_flow_failure") io.tool_error("OpenRouter authentication did not complete successfully.") # Fall through to the final error message return False -async def select_default_model(args, io, analytics): +async def select_default_model(args, io): """ Selects a default model based on available API keys if no model is specified. Offers OAuth flow for OpenRouter if no keys are found. @@ -122,7 +117,6 @@ async def select_default_model(args, io, analytics): Args: args: The command line arguments object. io: The InputOutput object for user interaction. - analytics: The Analytics object for tracking events. Returns: The name of the selected model, or None if no suitable default is found. @@ -133,14 +127,13 @@ async def select_default_model(args, io, analytics): model = try_to_select_default_model() if model: io.tool_warning(f"Using {model} model with API key from environment.") - analytics.event("auto_model_selection", model=model) return model no_model_msg = "No LLM model was specified and no API keys were provided." io.tool_warning(no_model_msg) # Try OAuth if no model was detected - await offer_openrouter_oauth(io, analytics) + await offer_openrouter_oauth(io) # Check again after potential OAuth success model = try_to_select_default_model() @@ -212,7 +205,7 @@ def exchange_code_for_key(code, code_verifier, io): # Function to start the OAuth flow -def start_openrouter_oauth_flow(io, analytics): +def start_openrouter_oauth_flow(io): """Initiates the OpenRouter OAuth PKCE flow using a local server.""" port = find_available_port() @@ -328,7 +321,6 @@ def run_server(): shutdown_server.wait(timeout=MINUTES * 60) # Convert minutes to seconds except KeyboardInterrupt: io.tool_warning("\nOAuth flow interrupted.") - analytics.event("oauth_flow_failed", provider="openrouter", reason="user_interrupt") interrupted = True # Ensure the server thread is signaled to shut down shutdown_server.set() @@ -341,16 +333,13 @@ def run_server(): if server_error: io.tool_error(f"Authentication failed: {server_error}") - analytics.event("oauth_flow_failed", provider="openrouter", reason=server_error) return None if not auth_code: io.tool_error("Authentication with OpenRouter failed.") - analytics.event("oauth_flow_failed", provider="openrouter") return None io.tool_output("Completing authentication...") - analytics.event("oauth_flow_code_received", provider="openrouter") # Exchange code for key api_key = exchange_code_for_key(auth_code, code_verifier, io) @@ -370,27 +359,17 @@ def run_server(): io.tool_warning("Aider will load the OpenRouter key automatically in future sessions.") io.tool_output() - analytics.event("oauth_flow_success", provider="openrouter") return api_key except Exception as e: io.tool_error(f"Successfully obtained key, but failed to save it to file: {e}") io.tool_warning("Set OPENROUTER_API_KEY environment variable for this session only.") # Still return the key for the current session even if saving failed - analytics.event("oauth_flow_save_failed", provider="openrouter", reason=str(e)) return api_key else: io.tool_error("Authentication with OpenRouter failed.") - analytics.event("oauth_flow_failed", provider="openrouter", reason="code_exchange_failed") return None -# Dummy Analytics class for testing -class DummyAnalytics: - def event(self, *args, **kwargs): - # print(f"Analytics Event: {args} {kwargs}") # Optional: print events - pass - - def main(): """Main function to test the OpenRouter OAuth flow.""" print("Starting OpenRouter OAuth flow test...") @@ -404,8 +383,6 @@ def main(): tool_output_color="BLUE", tool_error_color="RED", ) - # Use a dummy analytics object - analytics = DummyAnalytics() # Ensure OPENROUTER_API_KEY is not set, to trigger the flow naturally # (though start_openrouter_oauth_flow doesn't check this itself) @@ -413,7 +390,7 @@ def main(): print("Warning: OPENROUTER_API_KEY is already set in environment.") # del os.environ["OPENROUTER_API_KEY"] # Optionally unset it for testing - api_key = start_openrouter_oauth_flow(io, analytics) + api_key = start_openrouter_oauth_flow(io) if api_key: print("\nOAuth flow completed successfully!") diff --git a/aider/tools/load_skill.py b/aider/tools/load_skill.py new file mode 100644 index 00000000000..1b7a62f0de9 --- /dev/null +++ b/aider/tools/load_skill.py @@ -0,0 +1,51 @@ +from aider.tools.utils.base_tool import BaseTool + + +class Tool(BaseTool): + NORM_NAME = "loadskill" + SCHEMA = { + "type": "function", + "function": { + "name": "LoadSkill", + "description": ( + "Load a skill by name (agent mode only). Adds skill to include list and removes" + " from exclude list." + ), + "parameters": { + "type": "object", + "properties": { + "skill_name": { + "type": "string", + "description": "Name of the skill to load", + }, + }, + "required": ["skill_name"], + }, + }, + } + + @classmethod + def execute(cls, coder, skill_name): + """ + Load a skill by name (agent mode only). + """ + if not skill_name: + return "Error: Skill name is required." + + # Check if we're in agent mode + if not hasattr(coder, "edit_format") or coder.edit_format != "agent": + return "Error: Skill loading is only available in agent mode." + + # Check if skills_manager is available + if not hasattr(coder, "skills_manager") or coder.skills_manager is None: + error_msg = "Error: Skills manager is not initialized. Skills may not be configured." + # Check if skills directories are configured + if hasattr(coder, "skills_directory_paths") and not coder.skills_directory_paths: + error_msg += ( + "\nNo skills directories configured. Use --skills-paths to configure skill" + " directories." + ) + return error_msg + + # Use the instance method on skills_manager + return coder.skills_manager.load_skill(skill_name) diff --git a/aider/tools/remove_skill.py b/aider/tools/remove_skill.py new file mode 100644 index 00000000000..b31f009a1ca --- /dev/null +++ b/aider/tools/remove_skill.py @@ -0,0 +1,51 @@ +from aider.tools.utils.base_tool import BaseTool + + +class Tool(BaseTool): + NORM_NAME = "removeskill" + SCHEMA = { + "type": "function", + "function": { + "name": "RemoveSkill", + "description": ( + "Remove a skill by name (agent mode only). Removes skill from include list and adds" + " to exclude list." + ), + "parameters": { + "type": "object", + "properties": { + "skill_name": { + "type": "string", + "description": "Name of the skill to remove", + }, + }, + "required": ["skill_name"], + }, + }, + } + + @classmethod + def execute(cls, coder, skill_name): + """ + Remove a skill by name (agent mode only). + """ + if not skill_name: + return "Error: Skill name is required." + + # Check if we're in agent mode + if not hasattr(coder, "edit_format") or coder.edit_format != "agent": + return "Error: Skill removal is only available in agent mode." + + # Check if skills_manager is available + if not hasattr(coder, "skills_manager") or coder.skills_manager is None: + error_msg = "Error: Skills manager is not initialized. Skills may not be configured." + # Check if skills directories are configured + if hasattr(coder, "skills_directory_paths") and not coder.skills_directory_paths: + error_msg += ( + "\nNo skills directories configured. Use --skills-paths to configure skill" + " directories." + ) + return error_msg + + # Use the instance method on skills_manager + return coder.skills_manager.remove_skill(skill_name) diff --git a/aider/urls.py b/aider/urls.py index fad4043bf9e..081266aeb5c 100644 --- a/aider/urls.py +++ b/aider/urls.py @@ -11,7 +11,6 @@ github_issues = "https://github.com/dwash96/aider-ce/issues/new" git_index_version = "https://github.com/Aider-AI/aider/issues/211" install_properly = "https://aider.chat/docs/troubleshooting/imports.html" -analytics = "https://aider.chat/docs/more/analytics.html" release_notes = "https://github.com/dwash96/aider-ce/releases/latest" edit_formats = "https://aider.chat/docs/more/edit-formats.html" models_and_keys = "https://aider.chat/docs/troubleshooting/models-and-keys.html" diff --git a/aider/watch.py b/aider/watch.py index 5d0e95a4f87..ec2b98ff905 100644 --- a/aider/watch.py +++ b/aider/watch.py @@ -70,12 +70,11 @@ class FileWatcher: r"(?:#|//|--|;+) *(ai\b.*|ai\b.*|.*\bai[?!]?) *$", re.IGNORECASE ) - def __init__(self, coder, gitignores=None, verbose=False, analytics=None, root=None): + def __init__(self, coder, gitignores=None, verbose=False, root=None): self.coder = coder self.io = coder.io self.root = Path(root) if root else Path(coder.root) self.verbose = verbose - self.analytics = analytics self.stop_event = None self.watcher_thread = None self.changed_files = set() @@ -190,8 +189,6 @@ def process_changes(self): if fname in self.coder.abs_fnames: continue - if self.analytics: - self.analytics.event("ai-comments file-add") self.coder.abs_fnames.add(fname) rel_fname = self.coder.get_rel_fname(fname) if not added: @@ -206,8 +203,6 @@ def process_changes(self): ) return "" - if self.analytics: - self.analytics.event("ai-comments execute") self.io.tool_output("Processing your request...") if has_action == "!": diff --git a/aider/website/docs/config/agent-mode.md b/aider/website/docs/config/agent-mode.md index ca9f0d8d039..67f480849bf 100644 --- a/aider/website/docs/config/agent-mode.md +++ b/aider/website/docs/config/agent-mode.md @@ -46,6 +46,7 @@ Agent Mode uses a centralized local tool registry that manages all available too - **Context Management Tools**: `MakeEditable`, `MakeReadonly`, `Remove` - **Git Tools**: `GitDiff`, `GitLog`, `GitShow`, `GitStatus` - **Utility Tools**: `UpdateTodoList`, `ListChanges`, `UndoChange`, `Finished` +- **Skill Management**: `LoadSkill`, `RemoveSkill` #### Enhanced Context Management @@ -175,6 +176,7 @@ The following context blocks are available by default and can be customized usin - **`git_status`**: Shows current git branch, status, and recent commits - **`symbol_outline`**: Lists classes, functions, and methods in current context - **`todo_list`**: Shows the current todo list managed via `UpdateTodoList` tool +- **`skills`**: Include skills content in the conversation When `include_context_blocks` is specified, only the listed blocks will be included. When `exclude_context_blocks` is specified, the listed blocks will be removed from the default set. @@ -189,32 +191,47 @@ use-enhanced-map: true ``` -#### Usage Examples +#### Configuration Example -```bash -# Only allow specific tools -aider-ce --agent --agent-config '{"tools_includelist": ["view", "makeeditable", "replacetext", "finished"]}' +Complete configuration example in YAML configuration file (`.aider.conf.yml` or `~/.aider.conf.yml`): -# Exclude specific tools -aider-ce --agent --agent-config '{"tools_excludelist": ["command", "commandinteractive"]}' - -# Custom large file threshold -aider-ce --agent --agent-config '{"large_file_token_threshold": 10000}' +```yaml +# Enable Agent Mode +agent: true -# Custom context blocks configuration -aider-ce --agent --agent-config '{"include_context_blocks": ["directory_structure", "git_status"]}' +# Agent Mode configuration +agent-config: | + { + # Tool configuration + "tools_includelist": ["view", "makeeditable", "replacetext", "finished"], # Optional: Whitelist of tools + "tools_excludelist": ["command", "commandinteractive"], # Optional: Blacklist of tools + + # Context blocks configuration + "include_context_blocks": ["todo_list", "git_status"], # Optional: Context blocks to include + "exclude_context_blocks": ["symbol_outline", "directory_structure"], # Optional: Context blocks to exclude + + # Performance and behavior settings + "large_file_token_threshold": 12500, # Token threshold for large file warnings + "skip_cli_confirmations": false, # YOLO mode - be brave and let the LLM cook + + # Skills configuration (see Skills documentation for details) + "skills_paths": ["~/my-skills", "./project-skills"], # Directories to search for skills + "skills_includelist": ["python-refactoring", "react-components"], # Optional: Whitelist of skills to include + "skills_excludelist": ["legacy-tools"] # Optional: Blacklist of skills to exclude + } + +# Other Agent Mode options +preserve-todo-list: true # Preserve todo list across sessions +use-enhanced-map: true # Use enhanced repo map with import relationships +``` -# Exclude specific context blocks -aider-ce --agent --agent-config '{"exclude_context_blocks": ["symbol_outline", "todo_list"]}' +This configuration system allows for fine-grained control over which tools are available in Agent Mode, enabling security-conscious deployments and specialized workflows while maintaining essential functionality. -# Combined configuration -aider-ce --agent --agent-config '{"large_file_token_threshold": 10000, "tools_includelist": ["view", "makeeditable", "replacetext", "finished", "gitdiff"], "include_context_blocks": ["directory_structure", "git_status"]}' +### Skills -# Command Line Options -aider-ce --agent --agent-config '{"large_file_token_threshold": 10000, "tools_includelist": ["view", "makeeditable", "replacetext", "finished", "gitdiff"]}' --preserve-todo-list --use-enhanced-map -``` +Agent Mode includes a powerful skills system that allows you to extend the AI's capabilities with custom instructions, reference materials, scripts, and assets. Skills are configured through the `agent-config` parameter in the YAML configuration file. -This configuration system allows for fine-grained control over which tools are available in Agent Mode, enabling security-conscious deployments and specialized workflows while maintaining essential functionality. +For complete documentation on creating and using skills, including skill directory structure, SKILL.md format, and best practices, see the [Skills documentation](https://github.com/dwash96/aider-ce/blob/main/aider/website/docs/config/skills.md). ### Benefits diff --git a/aider/website/docs/config/skills.md b/aider/website/docs/config/skills.md new file mode 100644 index 00000000000..504a0b05ba0 --- /dev/null +++ b/aider/website/docs/config/skills.md @@ -0,0 +1,172 @@ +# Skills System + +Agent Mode includes a powerful skills system that allows you to extend the AI's capabilities with custom instructions, reference materials, scripts, and assets. Skills are organized collections of knowledge and tools that help the AI perform specific tasks more effectively. + +## Skill Directory Structure + +Skills follow a standardized directory structure: + +``` +skill-name/ +├── SKILL.md # Main skill definition with YAML frontmatter and instructions +├── references/ # Reference materials (markdown files) +│ └── example-api.md # API documentation +│ └── example-guide.md # Usage guide +├── scripts/ # Executable scripts +│ └── example-setup.sh # Setup script +│ └── example-deploy.py # Deployment script +└── assets/ # Binary assets (images, config files, etc.) + └── example-diagram.png # Architecture diagram + └── example-config.json # Configuration file +``` + +## SKILL.md Format + +The `SKILL.md` file contains YAML frontmatter followed by markdown instructions: + +```yaml +--- +name: python-refactoring +description: Tools and techniques for Python code refactoring +license: MIT +metadata: + version: 1.0.0 + author: AI Team + tags: [python, refactoring, code-quality] +--- + +# Python Refactoring Skill + +This skill provides tools and techniques for refactoring Python code... + +## Common Refactoring Patterns + +1. **Extract Method** - Break down large functions... +2. **Rename Variable** - Improve code readability... +3. **Simplify Conditionals** - Reduce complexity... + +## Usage Examples + +```python +# Before refactoring +def process_data(data): + # Complex logic here + pass + +# After refactoring +def process_data(data): + validate_input(data) + cleaned = clean_data(data) + result = analyze_data(cleaned) + return result +``` +``` + +## Skill Configuration + +Skills are configured through the `agent-config` parameter in the YAML configuration file. The following options are available: + +- **`skills_paths`**: Array of directory paths to search for skills +- **`skills_includelist`**: Array of skill names to include (whitelist) +- **`skills_excludelist`**: Array of skill names to exclude (blacklist) + +Complete configuration example in YAML configuration file (`.aider.conf.yml` or `~/.aider.conf.yml`): + +```yaml +# Enable Agent Mode +agent: true + +# Agent Mode configuration +agent-config: | + { + # Skills configuration + "skills_paths": ["~/my-skills", "./project-skills"], # Directories to search for skills + "skills_includelist": ["python-refactoring", "react-components"], # Optional: Whitelist of skills to include + "skills_excludelist": ["legacy-tools"], # Optional: Blacklist of skills to exclude + + # Other Agent Mode settings + "large_file_token_threshold": 12500, # Token threshold for large file warnings + "skip_cli_confirmations": false, # YOLO mode - be brave and let the LLM cook + "tools_includelist": ["view", "makeeditable", "replacetext", "finished"], # Optional: Whitelist of tools + "tools_excludelist": ["command", "commandinteractive"], # Optional: Blacklist of tools + "include_context_blocks": ["todo_list", "git_status"], # Optional: Context blocks to include + "exclude_context_blocks": ["symbol_outline", "directory_structure"] # Optional: Context blocks to exclude + } +``` + +## Creating Custom Skills + +To create a custom skill: + +1. Create a skill directory with the skill name +2. Add `SKILL.md` with YAML frontmatter and instructions +3. Add reference materials in `references/` directory +4. Add executable scripts in `scripts/` directory +5. Add binary assets in `assets/` directory +6. Test the skill by adding it to your configuration file: + +Example skill creation: +```bash +mkdir -p ~/skills/my-custom-skill/{references,scripts,assets} + +cat > ~/skills/my-custom-skill/SKILL.md << 'EOF' +--- +name: my-custom-skill +description: My custom skill for specific tasks +license: MIT +metadata: + version: 1.0.0 + author: Your Name +--- + +# My Custom Skill + +This skill helps with... + +## Features +- Feature 1 +- Feature 2 + +## Usage +1. Step 1 +2. Step 2 +EOF + +# Add a reference +cat > ~/skills/my-custom-skill/references/api.md << 'EOF' +# API Reference + +## Endpoints +- GET /api/data +- POST /api/process +EOF + +# Add a script +cat > ~/skills/my-custom-skill/scripts/setup.sh << 'EOF' +#!/bin/bash +echo "Setting up my custom skill..." +# Setup commands here +EOF +chmod +x ~/skills/my-custom-skill/scripts/setup.sh +``` + +## Best Practices for Skills + +1. **Keep skills focused**: Each skill should address a specific domain or task +2. **Provide clear instructions**: Write comprehensive, well-structured documentation +3. **Include examples**: Show practical usage examples +4. **Test scripts**: Ensure scripts work correctly and handle errors +5. **Version skills**: Use metadata to track skill versions +6. **License appropriately**: Specify licenses for reusable skills +7. **Organize references**: Structure reference materials logically + +## Skills in Action + +With skills enabled, the AI can: +- Reference specific techniques from skill instructions +- Use provided scripts to automate tasks +- Consult reference materials for API details +- Follow established patterns and best practices +- Combine multiple skills for complex tasks + +Skills transform Agent Mode from a general-purpose coding assistant into a domain-specific expert with access to curated knowledge and tools. diff --git a/requirements/requirements.in b/requirements/requirements.in index 02bfc83d0f3..4ab86dcebac 100644 --- a/requirements/requirements.in +++ b/requirements/requirements.in @@ -19,8 +19,6 @@ litellm>=1.75.0 flake8>=7.3.0 importlib_resources pyperclip>=1.9.0 -posthog>=6.4.1 -mixpanel>=4.10.1 pexpect>=4.9.0 json5>=0.12.0 psutil>=7.0.0 diff --git a/scripts/my_models.py b/scripts/my_models.py deleted file mode 100755 index 748c592fbf5..00000000000 --- a/scripts/my_models.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -import json -from collections import defaultdict, deque -from pathlib import Path - - -def collect_model_stats(n_lines=1000): - """Collect model usage statistics from the analytics file.""" - analytics_path = Path.home() / ".aider" / "analytics.jsonl" - model_stats = defaultdict(int) - - with open(analytics_path) as f: - lines = deque(f, n_lines) - for line in lines: - try: - event = json.loads(line) - if event["event"] == "message_send": - properties = event["properties"] - main_model = properties.get("main_model") - - total_tokens = properties.get("total_tokens", 0) - if main_model == "deepseek/deepseek-coder": - main_model = "deepseek/deepseek-chat" - if main_model: - model_stats[main_model] += total_tokens - except json.JSONDecodeError: - continue - - return model_stats - - -def format_text_table(model_stats): - """Format model statistics as a text table.""" - total_tokens = sum(model_stats.values()) - lines = [] - - lines.append("\nModel Token Usage Summary:") - lines.append("-" * 80) - lines.append(f"{'Model Name':<40} {'Total Tokens':>15} {'Percent':>10}") - lines.append("-" * 80) - - for model, tokens in sorted(model_stats.items(), key=lambda x: x[1], reverse=True): - percentage = (tokens / total_tokens) * 100 if total_tokens > 0 else 0 - lines.append(f"{model:<40} {tokens:>15,} {percentage:>9.1f}%") - - lines.append("-" * 80) - lines.append(f"{'TOTAL':<40} {total_tokens:>15,} {100:>9.1f}%") - - return "\n".join(lines) - - -def format_html_table(model_stats): - """Format model statistics as an HTML table.""" - total_tokens = sum(model_stats.values()) - - html = [ - "", - "", - ( - "Percent" - ), - ] - - for model, tokens in sorted(model_stats.items(), key=lambda x: x[1], reverse=True): - percentage = (tokens / total_tokens) * 100 if total_tokens > 0 else 0 - html.append( - f"" - f"" - f"" - ) - - html.append("
Model NameTotal Tokens
{model}{tokens:,}{percentage:.1f}%
") - - # Add note about redacted models if any are present - if any("REDACTED" in model for model in model_stats.keys()): - html.extend( - [ - "", - "{: .note :}", - "Some models show as REDACTED, because they are new or unpopular models.", - 'Aider\'s analytics only records the names of "well known" LLMs.', - ] - ) - - return "\n".join(html) - - -if __name__ == "__main__": - stats = collect_model_stats() - print(format_text_table(stats)) diff --git a/tests/basic/test_analytics.py b/tests/basic/test_analytics.py deleted file mode 100644 index e3178ee30ee..00000000000 --- a/tests/basic/test_analytics.py +++ /dev/null @@ -1,136 +0,0 @@ -import json -import os -import tempfile -from pathlib import Path -from unittest.mock import patch - -import pytest - -from aider.analytics import Analytics - - -@pytest.fixture -def temp_analytics_file(): - with tempfile.NamedTemporaryFile(delete=False) as f: - yield f.name - os.unlink(f.name) - - -@pytest.fixture -def temp_data_dir(monkeypatch): - with tempfile.TemporaryDirectory() as tmpdir: - temp_dir = Path(tmpdir) - monkeypatch.setattr(Path, "home", lambda: temp_dir) - yield temp_dir - - -def test_analytics_initialization(temp_data_dir): - analytics = Analytics(permanently_disable=True) - assert analytics.mp is None - assert analytics.ph is None - assert analytics.permanently_disable is True - assert analytics.user_id is not None - - -def test_analytics_enable_disable(temp_data_dir): - analytics = Analytics() - analytics.asked_opt_in = True - - analytics.enable() - # assert analytics.mp is not None - assert analytics.ph is not None - - analytics.disable(permanently=False) - assert analytics.mp is None - assert analytics.ph is None - assert analytics.permanently_disable is not True - - analytics.disable(permanently=True) - assert analytics.permanently_disable is True - - -def test_analytics_data_persistence(temp_data_dir): - analytics1 = Analytics() - user_id = analytics1.user_id - - analytics2 = Analytics() - assert analytics2.user_id == user_id - - -def test_analytics_event_logging(temp_analytics_file, temp_data_dir): - analytics = Analytics(logfile=temp_analytics_file) - analytics.asked_opt_in = True - analytics.enable() - - test_event = "test_event" - test_properties = {"test_key": "test_value"} - - # with patch.object(analytics.mp, "track") as mock_mp_track: - with patch.object(analytics.ph, "capture") as mock_ph_capture: - analytics.event(test_event, **test_properties) - - # mock_mp_track.assert_called_once() - mock_ph_capture.assert_called_once() - - # Verify logfile - with open(temp_analytics_file) as f: - log_entry = json.loads(f.read().strip()) - assert log_entry["event"] == test_event - assert "test_key" in log_entry["properties"] - - -def test_system_info(temp_data_dir): - analytics = Analytics() - sys_info = analytics.get_system_info() - - assert "python_version" in sys_info - assert "os_platform" in sys_info - assert "os_release" in sys_info - assert "machine" in sys_info - - -def test_need_to_ask(temp_data_dir): - analytics = Analytics() - assert analytics.need_to_ask(True) is True - assert analytics.need_to_ask(False) is False - - analytics.user_id = "000" - assert analytics.need_to_ask(None) is True - - analytics.asked_opt_in = True - assert analytics.need_to_ask(True) is False - - analytics.permanently_disable = True - assert analytics.need_to_ask(True) is False - - -def test_is_uuid_in_percentage(): - from aider.analytics import is_uuid_in_percentage - - # Test basic percentage thresholds - assert is_uuid_in_percentage("00000000000000000000000000000000", 1) is True - assert is_uuid_in_percentage("01999000000000000000000000000000", 1) is True - assert is_uuid_in_percentage("02000000000000000000000000000000", 1) is True - assert is_uuid_in_percentage("02910000000000000000000000000001", 1) is False - assert is_uuid_in_percentage("03000000000000000000000000000000", 1) is False - assert is_uuid_in_percentage("ff000000000000000000000000000000", 1) is False - - assert is_uuid_in_percentage("00000000000000000000000000000000", 10) is True - assert is_uuid_in_percentage("19000000000000000000000000000000", 10) is True - assert is_uuid_in_percentage("1a000000000000000000000000000000", 10) is False - assert is_uuid_in_percentage("ff000000000000000000000000000000", 10) is False - - # Test edge cases - assert is_uuid_in_percentage("00000000000000000000000000000000", 0) is False - assert is_uuid_in_percentage("00000000000000000000000000000000", 100) is True - assert is_uuid_in_percentage("ffffffffffffffffffffffffffffffff", 100) is True - - # Test invalid inputs - with pytest.raises(ValueError): - is_uuid_in_percentage("00000000000000000000000000000000", -1) - with pytest.raises(ValueError): - is_uuid_in_percentage("00000000000000000000000000000000", 101) - - # Test empty/None UUID - assert is_uuid_in_percentage("", 50) is False - assert is_uuid_in_percentage(None, 50) is False diff --git a/tests/basic/test_onboarding.py b/tests/basic/test_onboarding.py index b5b63412e8a..337efe1ca1e 100644 --- a/tests/basic/test_onboarding.py +++ b/tests/basic/test_onboarding.py @@ -19,13 +19,6 @@ ) -# Mock the Analytics class as it's used in some functions -class DummyAnalytics: - def event(self, *args, **kwargs): - pass - - -# Mock the InputOutput class class DummyIO: def tool_output(self, *args, **kwargs): pass @@ -292,8 +285,7 @@ async def test_select_default_model_already_specified(self, mock_offer_oauth, mo """Test select_default_model returns args.model if provided.""" args = argparse.Namespace(model="specific-model") io_mock = DummyIO() - analytics_mock = DummyAnalytics() - selected_model = await select_default_model(args, io_mock, analytics_mock) + selected_model = await select_default_model(args, io_mock) self.assertEqual(selected_model, "specific-model") mock_try_select.assert_not_called() mock_offer_oauth.assert_not_called() @@ -305,17 +297,14 @@ async def test_select_default_model_found_via_env(self, mock_offer_oauth, mock_t args = argparse.Namespace(model=None) # No model specified io_mock = DummyIO() io_mock.tool_warning = MagicMock() # Track warnings - analytics_mock = DummyAnalytics() - analytics_mock.event = MagicMock() # Track events - selected_model = await select_default_model(args, io_mock, analytics_mock) + selected_model = await select_default_model(args, io_mock) self.assertEqual(selected_model, "gpt-4o") mock_try_select.assert_called_once() io_mock.tool_warning.assert_called_once_with( "Using gpt-4o model with API key from environment." ) - analytics_mock.event.assert_called_once_with("auto_model_selection", model="gpt-4o") mock_offer_oauth.assert_not_called() @patch( @@ -330,13 +319,12 @@ async def test_select_default_model_no_keys_oauth_fail(self, mock_offer_oauth, m io_mock = DummyIO() io_mock.tool_warning = MagicMock() io_mock.offer_url = MagicMock() - analytics_mock = DummyAnalytics() - selected_model = await select_default_model(args, io_mock, analytics_mock) + selected_model = await select_default_model(args, io_mock) self.assertIsNone(selected_model) self.assertEqual(mock_try_select.call_count, 2) # Called before and after oauth attempt - mock_offer_oauth.assert_called_once_with(io_mock, analytics_mock) + mock_offer_oauth.assert_called_once_with(io_mock) io_mock.tool_warning.assert_called_once_with( "No LLM model was specified and no API keys were provided." ) @@ -356,13 +344,12 @@ async def test_select_default_model_no_keys_oauth_success( args = argparse.Namespace(model=None) io_mock = DummyIO() io_mock.tool_warning = MagicMock() - analytics_mock = DummyAnalytics() - selected_model = await select_default_model(args, io_mock, analytics_mock) + selected_model = await select_default_model(args, io_mock) self.assertEqual(selected_model, "openrouter/deepseek/deepseek-r1:free") self.assertEqual(mock_try_select.call_count, 2) # Called before and after oauth - mock_offer_oauth.assert_called_once_with(io_mock, analytics_mock) + mock_offer_oauth.assert_called_once_with(io_mock) # Only one warning is expected: "No LLM model..." self.assertEqual(io_mock.tool_warning.call_count, 1) io_mock.tool_warning.assert_called_once_with( @@ -380,17 +367,13 @@ async def test_offer_openrouter_oauth_confirm_yes_success(self, mock_start_oauth """Test offer_openrouter_oauth when user confirms and OAuth succeeds.""" io_mock = DummyIO() io_mock.confirm_ask = MagicMock(return_value=True) # User says yes - analytics_mock = DummyAnalytics() - analytics_mock.event = MagicMock() - result = await offer_openrouter_oauth(io_mock, analytics_mock) + result = await offer_openrouter_oauth(io_mock) self.assertTrue(result) io_mock.confirm_ask.assert_called_once() - mock_start_oauth.assert_called_once_with(io_mock, analytics_mock) + mock_start_oauth.assert_called_once_with(io_mock) self.assertEqual(os.environ.get("OPENROUTER_API_KEY"), "new_or_key") - analytics_mock.event.assert_any_call("oauth_flow_initiated", provider="openrouter") - analytics_mock.event.assert_any_call("oauth_flow_success") # Clean up env var del os.environ["OPENROUTER_API_KEY"] @@ -401,35 +384,28 @@ async def test_offer_openrouter_oauth_confirm_yes_fail(self, mock_start_oauth): io_mock = DummyIO() io_mock.confirm_ask = MagicMock(return_value=True) # User says yes io_mock.tool_error = MagicMock() - analytics_mock = DummyAnalytics() - analytics_mock.event = MagicMock() - result = await offer_openrouter_oauth(io_mock, analytics_mock) + result = await offer_openrouter_oauth(io_mock) self.assertFalse(result) io_mock.confirm_ask.assert_called_once() - mock_start_oauth.assert_called_once_with(io_mock, analytics_mock) + mock_start_oauth.assert_called_once_with(io_mock) self.assertNotIn("OPENROUTER_API_KEY", os.environ) io_mock.tool_error.assert_called_once_with( "OpenRouter authentication did not complete successfully." ) - analytics_mock.event.assert_any_call("oauth_flow_initiated", provider="openrouter") - analytics_mock.event.assert_any_call("oauth_flow_failure") @patch("aider.onboarding.start_openrouter_oauth_flow") async def test_offer_openrouter_oauth_confirm_no(self, mock_start_oauth): """Test offer_openrouter_oauth when user declines.""" io_mock = DummyIO() io_mock.confirm_ask = MagicMock(return_value=False) # User says no - analytics_mock = DummyAnalytics() - analytics_mock.event = MagicMock() - result = await offer_openrouter_oauth(io_mock, analytics_mock) + result = await offer_openrouter_oauth(io_mock) self.assertFalse(result) io_mock.confirm_ask.assert_called_once() mock_start_oauth.assert_not_called() - analytics_mock.event.assert_not_called() # No OAuth events if declined # --- More complex test for start_openrouter_oauth_flow (simplified) --- # This test focuses on the successful path, mocking heavily diff --git a/tests/basic/test_skills.py b/tests/basic/test_skills.py new file mode 100644 index 00000000000..159f344e5c1 --- /dev/null +++ b/tests/basic/test_skills.py @@ -0,0 +1,537 @@ +""" +Tests for aider/helpers/skills.py +""" + +import os +import tempfile +import unittest +from pathlib import Path +from unittest.mock import MagicMock + +from aider.helpers.skills import SkillsManager + + +class TestSkills(unittest.TestCase): + """Test suite for skills helper module.""" + + def setUp(self): + """Set up test fixtures.""" + self.temp_dir = tempfile.mkdtemp() + + def tearDown(self): + """Clean up test fixtures.""" + import shutil + + if os.path.exists(self.temp_dir): + shutil.rmtree(self.temp_dir) + + def test_skills_manager_initialization(self): + """Test that SkillsManager initializes correctly.""" + # Test with empty directory paths + manager = SkillsManager([]) + self.assertEqual(manager.directory_paths, []) + self.assertIsNone(manager.include_list) + self.assertEqual(manager.exclude_list, set()) + self.assertIsNone(manager.git_root) + # Test _loaded_skills is initialized as empty set + self.assertEqual(manager._loaded_skills, set()) + + # Test with directory paths + manager = SkillsManager(["/tmp/test"]) + self.assertEqual(len(manager.directory_paths), 1) + self.assertIsInstance(manager.directory_paths[0], Path) + self.assertEqual(manager._loaded_skills, set()) + + # Test with include/exclude lists + manager = SkillsManager( + ["/tmp/test"], + include_list=["skill1", "skill2"], + exclude_list=["skill3"], + git_root="/tmp", + ) + self.assertEqual(manager.include_list, {"skill1", "skill2"}) + self.assertEqual(manager.exclude_list, {"skill3"}) + self.assertEqual(manager.git_root, Path("/tmp").expanduser().resolve()) + self.assertEqual(manager._loaded_skills, set()) + + def test_create_and_parse_skill(self): + """Test creating a skill and parsing its metadata.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + + # Create SKILL.md with proper format + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill +--- + +# Test Skill + +These are the main instructions. +""") + + # Create references directory + ref_dir = skill_dir / "references" + ref_dir.mkdir() + (ref_dir / "api.md").write_text("# API Documentation") + + # Create scripts directory + scripts_dir = skill_dir / "scripts" + scripts_dir.mkdir() + (scripts_dir / "setup.sh").write_text("#!/bin/bash\necho 'Setup script'") + + # Create assets directory + assets_dir = skill_dir / "assets" + assets_dir.mkdir() + (assets_dir / "icon.png").write_bytes(b"fake_png_data") + + # Test loading the complete skill + manager = SkillsManager([self.temp_dir]) + skill_content = manager.get_skill_content("test-skill") + + self.assertIsNotNone(skill_content) + self.assertEqual(skill_content.metadata.name, "test-skill") + self.assertEqual(skill_content.metadata.description, "A test skill") + self.assertEqual( + skill_content.instructions, "# Test Skill\n\nThese are the main instructions." + ) + + # Check references - should be Path objects + self.assertEqual(len(skill_content.references), 1) + self.assertIn("api.md", skill_content.references) + self.assertIsInstance(skill_content.references["api.md"], Path) + self.assertEqual(skill_content.references["api.md"].name, "api.md") + + # Check scripts - should be Path objects + self.assertEqual(len(skill_content.scripts), 1) + self.assertIn("setup.sh", skill_content.scripts) + self.assertIsInstance(skill_content.scripts["setup.sh"], Path) + self.assertEqual(skill_content.scripts["setup.sh"].name, "setup.sh") + + # Check assets - should be Path objects + self.assertEqual(len(skill_content.assets), 1) + self.assertIn("icon.png", skill_content.assets) + self.assertIsInstance(skill_content.assets["icon.png"], Path) + self.assertEqual(skill_content.assets["icon.png"].name, "icon.png") + + # Test that skill was NOT added to _loaded_skills (only load_skill() does that) + self.assertNotIn("test-skill", manager._loaded_skills) + self.assertEqual(manager._loaded_skills, set()) + + def test_skill_summary_loader(self): + """Test the skill_summary_loader function.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + + # Create SKILL.md + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill for validation +--- + +# Test Skill + +Test content. +""") + # Test the skill summary loader (class method) + summary = SkillsManager.skill_summary_loader([self.temp_dir]) + + # Check that the summary contains expected information + self.assertIn("Found 1 skill(s)", summary) + self.assertIn("Skill: test-skill", summary) + self.assertIn("Description: A test skill for validation", summary) + + # Test with include list + summary = SkillsManager.skill_summary_loader([self.temp_dir], include_list=["test-skill"]) + self.assertIn("Found 1 skill(s)", summary) + + # Test with exclude list + summary = SkillsManager.skill_summary_loader([self.temp_dir], exclude_list=["test-skill"]) + self.assertIn("No skills found", summary) + + def test_resolve_skill_directories(self): + """Test the resolve_skill_directories function.""" + # Test with absolute path + paths = SkillsManager.resolve_skill_directories([self.temp_dir]) + self.assertEqual(len(paths), 1) + self.assertEqual(paths[0], Path(self.temp_dir).resolve()) + + # Test with relative path and git root + paths = SkillsManager.resolve_skill_directories(["./test-dir"], git_root=self.temp_dir) + # Should not resolve because directory doesn't exist + self.assertEqual(len(paths), 0) + + # Create the directory and test again + test_dir = Path(self.temp_dir) / "test-dir" + test_dir.mkdir() + paths = SkillsManager.resolve_skill_directories(["./test-dir"], git_root=self.temp_dir) + self.assertEqual(len(paths), 1) + self.assertEqual(paths[0], test_dir.resolve()) + + # Test with non-existent path + paths = SkillsManager.resolve_skill_directories(["/non-existent/path"]) + self.assertEqual(len(paths), 0) + + def test_remove_skill(self): + """Test the remove_skill instance method.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + + # Create SKILL.md + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill +--- + +# Test Skill + +Test content. +""") + + # Create a mock coder with agent mode + mock_coder = MagicMock() + mock_coder.edit_format = "agent" + mock_coder.skills_includelist = [] + mock_coder.skills_excludelist = [] + + # Create skills manager with coder reference + manager = SkillsManager([self.temp_dir], coder=mock_coder) + + # First add the skill + result = manager.load_skill("test-skill") + self.assertIn("Skill 'test-skill' loaded successfully", result) + self.assertIn("test-skill", manager._loaded_skills) + + # Test removing a skill that exists + result = manager.remove_skill("test-skill") + self.assertEqual("Skill 'test-skill' removed successfully.", result) + self.assertNotIn("test-skill", manager._loaded_skills) + + # Test removing the same skill again (should say not loaded) + result = manager.remove_skill("test-skill") + self.assertEqual("Skill 'test-skill' is not loaded.", result) + + # Test removing a skill not in include list (but not loaded) + mock_coder2 = MagicMock() + mock_coder2.edit_format = "agent" + mock_coder2.skills_includelist = [] + mock_coder2.skills_excludelist = [] + + manager2 = SkillsManager([self.temp_dir], coder=mock_coder2) + result = manager2.remove_skill("test-skill") + self.assertEqual("Skill 'test-skill' is not loaded.", result) + + # Test without coder reference + manager_no_coder = SkillsManager([self.temp_dir]) + result = manager_no_coder.remove_skill("test-skill") + self.assertEqual("Error: Skills manager not connected to a coder instance.", result) + + # Test not in agent mode + mock_coder3 = MagicMock() + mock_coder3.edit_format = "other-mode" + mock_coder3.skills_includelist = ["test-skill"] + mock_coder3.skills_excludelist = [] + + manager3 = SkillsManager([self.temp_dir], coder=mock_coder3) + result = manager3.remove_skill("test-skill") + self.assertEqual("Error: Skill removal is only available in agent mode.", result) + + # Test with empty skill name + mock_coder4 = MagicMock() + mock_coder4.edit_format = "agent" + mock_coder4.skills_includelist = [] + mock_coder4.skills_excludelist = [] + + manager4 = SkillsManager([self.temp_dir], coder=mock_coder4) + result = manager4.remove_skill("") + self.assertEqual("Error: Skill name is required.", result) + + def test_load_skill(self): + """Test the add_skill instance method.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + + # Create SKILL.md + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill +--- + +# Test Skill + +Test content. +""") + + # Create a mock coder with agent mode + mock_coder = MagicMock() + mock_coder.edit_format = "agent" + mock_coder.skills_includelist = [] + mock_coder.skills_excludelist = [] + + # Create skills manager with coder reference + manager = SkillsManager([self.temp_dir], coder=mock_coder) + + # Test adding a skill that exists + result = manager.load_skill("test-skill") + self.assertIn("Skill 'test-skill' loaded successfully", result) + self.assertIn("test-skill", manager._loaded_skills) + + # Test adding the same skill again (should say already loaded) + result = manager.load_skill("test-skill") + self.assertIn("Skill 'test-skill' is already loaded", result) + + # Test adding a non-existent skill + result = manager.load_skill("non-existent-skill") + self.assertIn( + "Error: Skill 'non-existent-skill' not found in configured directories.", result + ) + self.assertNotIn("non-existent-skill", manager._loaded_skills) + + # Test with skill in exclude list (should still work since add_skill doesn't check exclude list) + mock_coder2 = MagicMock() + mock_coder2.edit_format = "agent" + mock_coder2.skills_includelist = [] + mock_coder2.skills_excludelist = ["test-skill"] + + manager2 = SkillsManager([self.temp_dir], coder=mock_coder2) + result = manager2.load_skill("test-skill") + self.assertIn("Skill 'test-skill' loaded successfully", result) + self.assertIn("test-skill", manager2._loaded_skills) + + # Test without coder reference + manager_no_coder = SkillsManager([self.temp_dir]) + result = manager_no_coder.load_skill("test-skill") + self.assertEqual("Error: Skills manager not connected to a coder instance.", result) + + # Test not in agent mode + mock_coder3 = MagicMock() + mock_coder3.edit_format = "other-mode" + mock_coder3.skills_includelist = [] + mock_coder3.skills_excludelist = [] + + manager3 = SkillsManager([self.temp_dir], coder=mock_coder3) + result = manager3.load_skill("test-skill") + self.assertEqual("Error: Skill loading is only available in agent mode.", result) + + def test_get_skill_content_does_not_add_to_loaded_skills(self): + """Test that get_skill_content() does NOT add to _loaded_skills.""" + # Create two skill directory structures + skill_dir1 = Path(self.temp_dir) / "skill1" + skill_dir1.mkdir() + skill_md1 = skill_dir1 / "SKILL.md" + skill_md1.write_text("""--- +name: skill1 +description: First test skill +--- + +# Skill 1 + +Test content. +""") + + skill_dir2 = Path(self.temp_dir) / "skill2" + skill_dir2.mkdir() + skill_md2 = skill_dir2 / "SKILL.md" + skill_md2.write_text("""--- +name: skill2 +description: Second test skill +--- + +# Skill 2 + +Test content. +""") + + # Create skills manager + manager = SkillsManager([self.temp_dir]) + + # Test initial state + self.assertEqual(manager._loaded_skills, set()) + + # Get first skill content + skill1 = manager.get_skill_content("skill1") + self.assertIsNotNone(skill1) + self.assertEqual(manager._loaded_skills, set()) # Should NOT be added + + # Get second skill content + skill2 = manager.get_skill_content("skill2") + self.assertIsNotNone(skill2) + self.assertEqual(manager._loaded_skills, set()) # Should NOT be added + + # Get non-existent skill (should not add to _loaded_skills) + skill3 = manager.get_skill_content("nonexistent") + self.assertIsNone(skill3) + self.assertEqual(manager._loaded_skills, set()) + + # Get same skill again (should not add to _loaded_skills) + skill1_again = manager.get_skill_content("skill1") + self.assertIsNotNone(skill1_again) + self.assertEqual(manager._loaded_skills, set()) + + def test_get_skills_content_only_returns_loaded_skills(self): + """Test that get_skills_content() only returns skills in _loaded_skills.""" + # Create two skill directory structures + skill_dir1 = Path(self.temp_dir) / "skill1" + skill_dir1.mkdir() + skill_md1 = skill_dir1 / "SKILL.md" + skill_md1.write_text("""--- +name: skill1 +description: First test skill +--- + +# Skill 1 + +Test content. +""") + + skill_dir2 = Path(self.temp_dir) / "skill2" + skill_dir2.mkdir() + skill_md2 = skill_dir2 / "SKILL.md" + skill_md2.write_text("""--- +name: skill2 +description: Second test skill +--- + +# Skill 2 + +Test content. +""") + + # Create skills manager + manager = SkillsManager([self.temp_dir]) + + # Test with no loaded skills + content = manager.get_skills_content() + self.assertIsNone(content) + + # Load only skill1 via load_skill() (requires mock coder) + mock_coder = MagicMock() + mock_coder.edit_format = "agent" + mock_coder.skills_includelist = [] + mock_coder.skills_excludelist = [] + manager.coder = mock_coder + + result = manager.load_skill("skill1") + self.assertIn("Skill 'skill1' loaded successfully", result) + content = manager.get_skills_content() + self.assertIsNotNone(content) + self.assertIn("skill1", content) + self.assertNotIn("skill2", content) + + # Load skill2 as well + result = manager.load_skill("skill2") + self.assertIn("Skill 'skill2' loaded successfully", result) + content = manager.get_skills_content() + self.assertIsNotNone(content) + self.assertIn("skill1", content) + self.assertIn("skill2", content) + + def test_add_skill_updates_loaded_skills(self): + """Test that load_skill() updates _loaded_skills.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill +--- + +# Test Skill + +Test content. +""") + + # Create a mock coder with agent mode + mock_coder = MagicMock() + mock_coder.edit_format = "agent" + mock_coder.skills_includelist = [] + mock_coder.skills_excludelist = [] + + # Create skills manager + manager = SkillsManager([self.temp_dir], coder=mock_coder) + + # Test initial state + self.assertEqual(manager._loaded_skills, set()) + + # Add skill via load_skill() (simulating /load-skill command) + result = manager.load_skill("test-skill") + self.assertIn("Skill 'test-skill' loaded successfully", result) + self.assertIn("test-skill", manager._loaded_skills) + + # Test get_skills_content returns the skill + content = manager.get_skills_content() + self.assertIsNotNone(content) + self.assertIn("test-skill", content) + + def test_remove_skill_updates_loaded_skills(self): + """Test that remove_skill() updates _loaded_skills.""" + # Create a skill directory structure + skill_dir = Path(self.temp_dir) / "test-skill" + skill_dir.mkdir() + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""--- +name: test-skill +description: A test skill +--- + +# Test Skill + +Test content. +""") + + # Create a mock coder with agent mode + mock_coder = MagicMock() + mock_coder.edit_format = "agent" + mock_coder.skills_includelist = [] + mock_coder.skills_excludelist = [] + + # Create skills manager and load the skill first via load_skill() + manager = SkillsManager([self.temp_dir], coder=mock_coder) + result = manager.load_skill("test-skill") + self.assertIn("Skill 'test-skill' loaded successfully", result) + self.assertIn("test-skill", manager._loaded_skills) + + # Remove the skill + result = manager.remove_skill("test-skill") + self.assertEqual("Skill 'test-skill' removed successfully.", result) + self.assertNotIn("test-skill", manager._loaded_skills) + + # Test get_skills_content returns None + content = manager.get_skills_content() + self.assertIsNone(content) + + def test_skill_not_loaded_when_get_skill_content_fails(self): + """Test that skill is not added to _loaded_skills when get_skill_content() fails.""" + # Create a skill directory structure with invalid SKILL.md (no frontmatter) + skill_dir = Path(self.temp_dir) / "invalid-skill" + skill_dir.mkdir() + skill_md = skill_dir / "SKILL.md" + skill_md.write_text("""# Invalid Skill + +No frontmatter, so get_skill_content() should fail. +""") + + # Create skills manager + manager = SkillsManager([self.temp_dir]) + + # Try to get invalid skill content + skill = manager.get_skill_content("invalid-skill") + self.assertIsNone(skill) + self.assertEqual(manager._loaded_skills, set()) + + # Test get_skills_content returns None + content = manager.get_skills_content() + self.assertIsNone(content) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/browser/test_browser.py b/tests/browser/test_browser.py deleted file mode 100644 index c21dfa8cb41..00000000000 --- a/tests/browser/test_browser.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import unittest -from unittest.mock import patch - -from aider.main import main - - -class TestBrowser(unittest.TestCase): - @patch("aider.main.launch_gui") - def test_browser_flag_imports_streamlit(self, mock_launch_gui): - os.environ["AIDER_ANALYTICS"] = "false" - - # Run main with --browser and --yes flags - main(["--browser", "--yes"]) - - # Check that launch_gui was called - mock_launch_gui.assert_called_once() - - # Try to import streamlit - try: - import streamlit # noqa: F401 - - streamlit_imported = True - except ImportError: - streamlit_imported = False - - # Assert that streamlit was successfully imported - self.assertTrue( - streamlit_imported, "Streamlit should be importable after running with --browser flag" - ) - - -if __name__ == "__main__": - unittest.main()