Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,5 @@
aider/__version__.py
aider/_version.py
*.pyc
.aider*
env/
2 changes: 1 addition & 1 deletion aider/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from packaging import version

__version__ = "0.88.28.dev"
__version__ = "0.88.29.dev"
safe_version = __version__

try:
Expand Down
86 changes: 83 additions & 3 deletions aider/coders/agent_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
replace_lines,
replace_text,
show_numbered_context,
thinking,
undo_change,
update_todo_list,
view,
Expand Down Expand Up @@ -182,6 +183,7 @@ def _build_tool_registry(self):
replace_lines,
replace_text,
show_numbered_context,
thinking,
undo_change,
update_todo_list,
view,
Expand Down Expand Up @@ -1165,6 +1167,73 @@ async def _execute_tool_with_registry(self, norm_tool_name, params):

return f"Error: Unknown tool name '{norm_tool_name}'"

def _convert_concatenated_json_to_tool_calls(self, content):
"""
Check if content contains concatenated JSON objects and convert them to tool call format.

Args:
content (str): Content to check for concatenated JSON

Returns:
str: Content with concatenated JSON converted to tool call format, or original content if no JSON found
"""
try:
# Use split_concatenated_json to detect and split concatenated JSON objects
json_chunks = utils.split_concatenated_json(content)

# If we found multiple JSON objects, convert them to tool call format
if len(json_chunks) >= 1:
tool_calls = []
for chunk in json_chunks:
try:
json_obj = json.loads(chunk)
# Check if this looks like a tool call JSON object
if (
isinstance(json_obj, dict)
and "name" in json_obj
and "arguments" in json_obj
):
tool_name = json_obj["name"]
arguments = json_obj["arguments"]

# Convert arguments dictionary to keyword arguments string
kw_args = []
for key, value in arguments.items():
if isinstance(value, str):
# Escape quotes and wrap in quotes
escaped_value = value.replace('"', '\\"')
kw_args.append(f'{key}="{escaped_value}"')
elif isinstance(value, bool):
kw_args.append(f"{key}={str(value).lower()}")
elif value is None:
kw_args.append(f"{key}=None")
else:
# For numbers and other types, use repr for safe representation
kw_args.append(f"{key}={repr(value)}")

# Join keyword arguments
kw_args_str = ", ".join(kw_args)

# Convert to [tool_call(ToolName, key1="value1", key2="value2")] format
tool_call = f"[tool_call({tool_name}, {kw_args_str})]"
tool_calls.append(tool_call)
else:
# Not a tool call JSON, keep as is
tool_calls.append(chunk)
except json.JSONDecodeError:
# Invalid JSON, keep as is
tool_calls.append(chunk)

# If we found any tool calls, replace the content
if any(call.startswith("[tool_") for call in tool_calls):
return "".join(tool_calls)

except Exception as e:
# If anything goes wrong, return original content
self.io.tool_warning(f"Error converting concatenated JSON to tool calls: {str(e)}")

return content

async def _process_tool_commands(self, content):
"""
Process tool commands in the `[tool_call(name, param=value)]` format within the content.
Expand All @@ -1184,20 +1253,31 @@ async def _process_tool_commands(self, content):
max_calls = self.max_tool_calls
tool_names = []

# Check if content contains concatenated JSON and convert to tool call format
content = self._convert_concatenated_json_to_tool_calls(content)

# Check if there's a '---' separator and only process tool calls after the LAST one
separator_marker = "---"
content_parts = content.split(separator_marker)

# If there's no separator, treat the entire content as before the separator
# But only return immediately if no tool calls were found in the JSON conversion
if len(content_parts) == 1:
# Return the original content with no tool calls processed, and the content itself as before_separator
return content, result_messages, False, content, tool_names
# Check if we have any tool calls in the content after JSON conversion
# If we have tool calls, we should process them even without a separator
tool_call_pattern = r"\[tool_call\([^\]]+\)\]"
if re.search(tool_call_pattern, content):
# We have tool calls, so continue processing
content_before_separator = ""
content_after_separator = content
else:
# No tool calls found, return the original content
return content, result_messages, False, content, tool_names

# Take everything before the last separator (including intermediate separators)
content_before_separator = separator_marker.join(content_parts[:-1])
# Take only what comes after the last separator
content_after_separator = content_parts[-1]

# Find tool calls using a more robust method, but only in the content after separator
processed_content = content_before_separator + separator_marker
last_index = 0
Expand Down
4 changes: 2 additions & 2 deletions aider/coders/agent_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ class AgentPrompts(CoderPrompts):
<context name="workflow_and_tool_usage">
## Core Workflow
1. **Plan**: Determine the necessary changes. Use the `UpdateTodoList` tool to manage your plan. Always begin by the todo list.
2. **Explore**: Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `Grep`) to find relevant files. These tools add files to context as read-only. Use `Grep` first for broad searches to avoid context clutter.
3. **Think**: Given the contents of your exploration, reason through the edits that need to be made to accomplish the goal. For complex edits, briefly outline your plan for the user.
2. **Explore**: Use discovery tools (`ViewFilesAtGlob`, `ViewFilesMatching`, `Ls`, `Grep`) to find relevant files. These tools add files to context as read-only. Use `Grep` first for broad searches to avoid context clutter. Concisely describe your search strategy with the `Thinking` tool.
3. **Think**: Given the contents of your exploration, concisely reason through the edits with the `Thinking` tool that need to be made to accomplish the goal. For complex edits, briefly outline your plan for the user.
4. **Execute**: Use the appropriate editing tool. Remember to use `MakeEditable` on a file before modifying it. Break large edits (those greater than 100 lines) into multiple steps
5. **Verify & Recover**: After every edit, check the resulting diff snippet. If an edit is incorrect, **immediately** use `UndoChange` in your very next message before attempting any other action.
6. **Finished**: Use the `Finished` tool when all tasks and changes needed to accomplish the goal are finished
Expand Down
16 changes: 16 additions & 0 deletions aider/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -998,6 +998,22 @@ async def cmd_add(self, args):
if hasattr(self.coder, "_calculate_context_block_tokens"):
self.coder._calculate_context_block_tokens()

if self.coder.repo_map:
map_tokens = self.coder.repo_map.max_map_tokens
map_mul_no_files = self.coder.repo_map.map_mul_no_files
else:
map_tokens = 0
map_mul_no_files = 1

raise SwitchCoder(
edit_format=self.coder.edit_format,
summarize_from_coder=False,
from_coder=self.coder,
map_tokens=map_tokens,
map_mul_no_files=map_mul_no_files,
show_announcements=False,
)

def completions_drop(self):
files = self.coder.get_inchat_relative_files()
read_only_files = [
Expand Down
52 changes: 47 additions & 5 deletions aider/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,8 @@ def __init__(
self.bell_on_next_input = False
self.notifications = notifications
self.verbose = verbose
self.profile_start_time = None
self.profile_last_time = None

# Variables used to interface with base_coder
self.coder = None
Expand Down Expand Up @@ -417,14 +419,17 @@ def __init__(
self.chat_history_file = None

self.encoding = encoding
valid_line_endings = {"platform", "lf", "crlf"}
valid_line_endings = {"platform", "lf", "crlf", "preserve"}
if line_endings not in valid_line_endings:
raise ValueError(
f"Invalid line_endings value: {line_endings}. "
f"Must be one of: {', '.join(valid_line_endings)}"
)
self.line_endings = line_endings
self.newline = (
None if line_endings == "platform" else "\n" if line_endings == "lf" else "\r\n"
None
if line_endings in ("platform", "preserve")
else "\n" if line_endings == "lf" else "\r\n"
)
self.dry_run = dry_run

Expand Down Expand Up @@ -643,6 +648,18 @@ def read_text(self, filename, silent=False):
self.tool_error("Use --encoding to set the unicode encoding.")
return

def _detect_newline(self, filename):
try:
with open(filename, "rb") as f:
chunk = f.read(1024)
if b"\r\n" in chunk:
return "\r\n"
elif b"\n" in chunk:
return "\n"
except (FileNotFoundError, IsADirectoryError):
pass # File doesn't exist or is a directory, will use default
return None

def write_text(self, filename, content, max_retries=5, initial_delay=0.1):
"""
Writes content to a file, retrying with progressive backoff if the file is locked.
Expand All @@ -655,10 +672,14 @@ def write_text(self, filename, content, max_retries=5, initial_delay=0.1):
if self.dry_run:
return

newline = self.newline
if self.line_endings == "preserve":
newline = self._detect_newline(filename) or self.newline

delay = initial_delay
for attempt in range(max_retries):
try:
with open(str(filename), "w", encoding=self.encoding, newline=self.newline) as f:
with open(str(filename), "w", encoding=self.encoding, newline=newline) as f:
f.write(content)
return # Successfully wrote the file
except PermissionError as err:
Expand Down Expand Up @@ -727,10 +748,10 @@ async def get_input(
show = ""
if rel_fnames:
rel_read_only_fnames = [
get_rel_fname(fname, root) for fname in (abs_read_only_fnames or [])
get_rel_fname(fname, root) for fname in abs_read_only_fnames or []
]
rel_read_only_stubs_fnames = [
get_rel_fname(fname, root) for fname in (abs_read_only_stubs_fnames or [])
get_rel_fname(fname, root) for fname in abs_read_only_stubs_fnames or []
]
show = self.format_files_for_input(
rel_fnames, rel_read_only_fnames, rel_read_only_stubs_fnames
Expand Down Expand Up @@ -1354,6 +1375,27 @@ def tool_output(self, *messages, log_only=False, bold=False):

self.stream_print(*messages, style=style)

def profile(self, *messages, start=False):
if not self.verbose:
return

now = time.time()
message_str = " ".join(map(str, messages))

# Treat uninitialized as an implicit start.
if start or self.profile_start_time is None:
self.profile_start_time = now
self.stream_print(f"PROFILE: {message_str}")
else:
total_elapsed = now - self.profile_start_time
last_elapsed = now - self.profile_last_time
output_message = (
f"PROFILE: [+{last_elapsed:6.2f}s] {message_str} (total {total_elapsed:.2f}s)"
)
self.stream_print(output_message)

self.profile_last_time = now

def assistant_output(self, message, pretty=None):
if not message:
self.tool_warning("Empty response received from LLM. Check your provider account?")
Expand Down
15 changes: 15 additions & 0 deletions aider/repomap.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ def __init__(
self._mentioned_ident_similarity = 0.8

if self.verbose:
self.io.tool_output(f"RepoMap loaded entries from tags cache: {len(self.TAGS_CACHE)}")
self.io.tool_output(
f"RepoMap initialized with map_mul_no_files: {self.map_mul_no_files}"
)
Expand Down Expand Up @@ -696,6 +697,8 @@ def get_ranked_tags(
if tag.specific_kind == "import":
file_imports[rel_fname].add(tag.name)

self.io.profile("Process Files")

if self.use_enhanced_map and len(file_imports) > 0:
import_ast_mode = True

Expand Down Expand Up @@ -791,6 +794,8 @@ def get_ranked_tags(
weight = num_refs * use_mul * 2 ** (-1 * path_distance)
G.add_edge(referencer, definer, weight=weight, key=ident, ident=ident)

self.io.profile("Build Graph")

if not references:
pass

Expand All @@ -808,6 +813,8 @@ def get_ranked_tags(
except ZeroDivisionError:
return []

self.io.profile("PageRank")

# distribute the rank from each source node, across all of its out edges
ranked_definitions = defaultdict(float)
for src in G.nodes:
Expand All @@ -822,6 +829,8 @@ def get_ranked_tags(
ident = data["ident"]
ranked_definitions[(dst, ident)] += data["rank"]

self.io.profile("Distribute Rank")

ranked_tags = []
ranked_definitions = sorted(
ranked_definitions.items(), reverse=True, key=lambda x: (x[1], x[0])
Expand Down Expand Up @@ -929,6 +938,8 @@ def get_ranked_tags_map_uncached(
mentioned_fnames=None,
mentioned_idents=None,
):
self.io.profile("Start Rank Tags Map Uncached", start=True)

if not other_fnames:
other_fnames = list()
if not max_map_tokens:
Expand All @@ -944,6 +955,8 @@ def get_ranked_tags_map_uncached(
chat_fnames, other_fnames, mentioned_fnames, mentioned_idents, True
)

self.io.profile("Finish Getting Ranked Tags")

other_rel_fnames = sorted(set(self.get_rel_fname(fname) for fname in other_fnames))
special_fnames = filter_important_files(other_rel_fnames)
ranked_tags_fnames = set(tag[0] for tag in ranked_tags)
Expand Down Expand Up @@ -992,6 +1005,8 @@ def get_ranked_tags_map_uncached(

middle = int((lower_bound + upper_bound) // 2)

self.io.profile("Calculate Best Tree")

return best_tree

tree_cache = dict()
Expand Down
2 changes: 2 additions & 0 deletions aider/tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
replace_lines,
replace_text,
show_numbered_context,
thinking,
undo_change,
update_todo_list,
view,
Expand Down Expand Up @@ -64,6 +65,7 @@
replace_lines,
replace_text,
show_numbered_context,
thinking,
undo_change,
update_todo_list,
view,
Expand Down
Loading
Loading