Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,20 @@ clean:
find . -type d -name "dist" -exec rm -rf {} +
find . -type d -name "build" -exec rm -rf {} +

test-workflows:
python -m tests.run_prefect_workflow_tests --debug

test-workflows-normal:
python -m tests.run_prefect_workflow_tests --no-test-mode

help:
@echo "Available targets:"
@echo " all : Install dependencies and aider (default)"
@echo " install : Install dependencies and aider"
@echo " install-deps : Install Python dependencies from requirements.txt"
@echo " install-aider: Install aider tool"
@echo " test : Run tests with pytest"
@echo " test-workflows: Run Prefect workflow tests in test mode"
@echo " test-workflows-normal: Run Prefect workflow tests in normal mode"
@echo " clean : Remove Python cache files and build artifacts"
@echo " help : Show this help message"
1 change: 1 addition & 0 deletions requirements-prefect.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
prefect==0.15.8
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ beautifulsoup4>=4.9.3
gitpython
pytest
pytest-cov
prefect>=1.0.0
115 changes: 91 additions & 24 deletions src/response_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
2. Skip: Skipped because triggers were not met (e.g., no bot tag, already responded)
3. Error: An error occurred during processing (e.g., exception thrown)
"""
from typing import Optional, Tuple, List, Union
from typing import Optional, Tuple, List, Union, Dict, Any

from prefect import task, Flow, case
from prefect.tasks.control_flow import merge
from dotenv import load_dotenv
import string
import triggers
Expand Down Expand Up @@ -345,6 +347,7 @@ def summarize_relevant_comments(
return summarized_comments, comment_list, summary_comment_str


@task(name="Generate Feedback Response")
def generate_feedback_response(
issue: Issue,
repo_name: str,
Expand Down Expand Up @@ -430,6 +433,7 @@ def generate_feedback_response(
return updated_response, all_content


@task(name="Generate New Response")
def generate_new_response(
issue: Issue,
repo_name: str,
Expand Down Expand Up @@ -540,6 +544,7 @@ def generate_new_response(
return response, all_content


@task(name="Generate Edit Command Response")
def generate_edit_command_response(
issue: Issue,
repo_name: str,
Expand Down Expand Up @@ -621,6 +626,7 @@ def generate_edit_command_response(
############################################################


@task(name="Check Triggers")
def check_triggers(issue: Issue) -> str:
"""
Check if the issue contains any triggers for generating a response
Expand All @@ -644,23 +650,25 @@ def check_triggers(issue: Issue) -> str:
return None


def response_selector(trigger: str) -> Callable:
@task(name="Response Selector")
def response_selector(trigger: str) -> Dict[str, Any]:
"""
Generate a response for a GitHub issue using autogen agents
Select the appropriate response function based on the trigger

Args:
trigger: The trigger phrase for generating the response

Returns:
Dictionary with the response function and its name
"""
if trigger == "feedback":
return generate_feedback_response
return {"func": generate_feedback_response, "name": "feedback"}
elif trigger == "generate_edit_command":
return generate_edit_command_response
return {"func": generate_edit_command_response, "name": "generate_edit_command"}
elif trigger == "new_response":
return generate_new_response
return {"func": generate_new_response, "name": "new_response"}
else:
return None
return {"func": None, "name": "none"}


def write_pr_comment(
Expand All @@ -687,6 +695,7 @@ def write_pr_comment(
pr_obj.create_issue_comment(write_str)


@task(name="Develop Issue Flow")
def develop_issue_flow(
issue_or_pr: Union[Issue, PullRequest],
repo_name: str,
Expand Down Expand Up @@ -784,6 +793,7 @@ def develop_issue_flow(
return True, None


@task(name="Respond PR Comment Flow")
def respond_pr_comment_flow(
issue_or_pr: Union[Issue, PullRequest],
repo_name: str,
Expand Down Expand Up @@ -907,6 +917,7 @@ def respond_pr_comment_flow(
return False, pr_msg


@task(name="Standalone PR Flow")
def standalone_pr_flow(
issue_or_pr: Union[Issue, PullRequest],
repo_name: str,
Expand Down Expand Up @@ -984,9 +995,11 @@ def standalone_pr_flow(
raise Exception(error_msg)


@task(name="Process Issue")
def process_issue(
issue_or_pr: Union[Issue, PullRequest],
repo_name: str,
test_mode: bool = False,
) -> Tuple[bool, Optional[str]]:
"""
Process a single issue or PR - check if it needs response and generate one
Expand Down Expand Up @@ -1063,7 +1076,27 @@ def process_issue(
else:
# Generate and post response
trigger = check_triggers(issue_or_pr)
response_func = response_selector(trigger)
response_info = response_selector(trigger)
response_func = response_info["func"]

if test_mode:
# In test mode, simulate all triggers
tab_print(
f"TEST MODE: Simulating all triggers for {entity_type} #{issue_or_pr.number}")
test_triggers = ["feedback",
"generate_edit_command", "new_response"]
for test_trigger in test_triggers:
test_response_info = response_selector(test_trigger)
test_func = test_response_info["func"]
if test_func:
tab_print(
f"TEST MODE: Running {test_response_info['name']} flow")
test_response, test_content = test_func(
issue_or_pr, repo_name)
tab_print(
f"TEST MODE: {test_response_info['name']} flow completed")
return True, "Test mode completed all trigger simulations"

if response_func is None:
# This is a skip outcome, not an error
return False, f"No trigger found for {entity_type} #{issue_or_pr.number}"
Expand Down Expand Up @@ -1142,8 +1175,10 @@ def run_aider(message: str, repo_path: str) -> str:
raise RuntimeError(error_msg)


@task(name="Process Repository")
def process_repository(
repo_name: str,
test_mode: bool = False,
) -> None:
"""
Process all open issues and PRs in a repository
Expand Down Expand Up @@ -1187,7 +1222,7 @@ def process_repository(
entity_type = "PR" if is_pull_request(item) else "issue"

# Process the issue/PR and determine the outcome
success, message = process_issue(item, repo_name)
success, message = process_issue(item, repo_name, test_mode)
if success:
# Success outcome
tab_print(
Expand Down Expand Up @@ -1242,23 +1277,55 @@ def initialize_bot() -> None:
print('===============================')


if __name__ == '__main__':
# Initialize the bot (self-update)
@task(name="Get Tracked Repos")
def get_tracked_repos_task() -> List[str]:
"""Prefect task wrapper for get_tracked_repos"""
return get_tracked_repos()


@task(name="Initialize Bot")
def initialize_bot_task() -> None:
"""Prefect task wrapper for initialize_bot"""
initialize_bot()

# Get list of repositories to process
tracked_repos = get_tracked_repos()
print(f'Found {len(tracked_repos)} tracked repositories')
pprint(tracked_repos)

# Process each repository
for repo_name in tracked_repos:
print(f'\n=== Processing repository: {repo_name} ===')
try:
process_repository(repo_name)
print(f'Completed processing {repo_name}')
except Exception as e:
tab_print(f'Error processing {repo_name}: {str(e)}')
continue
def create_prefect_flow(test_mode: bool = False) -> Flow:
"""
Create a Prefect flow for orchestrating the GitHub issue response workflow

Args:
test_mode: Whether to run in test mode (simulating all triggers)

Returns:
Prefect Flow object
"""
with Flow("GitHub Issue Response Flow") as flow:
# Initialize the bot
init = initialize_bot_task()

# Get list of repositories to process
repos = get_tracked_repos_task()

# Process each repository
for repo_name in repos:
process_task = process_repository(repo_name, test_mode)
# Set dependency to ensure initialization happens first
process_task.set_upstream(init)

return flow


if __name__ == '__main__':
# Parse command line arguments
import argparse
parser = argparse.ArgumentParser(
description='Run the GitHub issue response agent')
parser.add_argument('--test', action='store_true',
help='Run in test mode (simulate all triggers)')
args = parser.parse_args()

# Create and run the flow
flow = create_prefect_flow(test_mode=args.test)
flow.run()

print('\nCompleted processing all repositories')
77 changes: 77 additions & 0 deletions src/test_response_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
"""
Test script for response_agent.py using Prefect to orchestrate workflows
"""
from src.response_agent import (
generate_feedback_response,
generate_new_response,
generate_edit_command_response,
develop_issue_flow,
respond_pr_comment_flow,
standalone_pr_flow,
check_triggers,
response_selector,
process_issue,
process_repository,
get_tracked_repos_task,
initialize_bot_task,
create_prefect_flow
)
from prefect.utilities.debug import raise_on_exception
from prefect.engine.state import State
from prefect.engine.results import LocalResult
from prefect import Flow, task, Parameter
import os
import sys
from typing import List, Dict, Any

# Add the parent directory to the path so we can import from src
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


@task
def print_test_header(test_name: str) -> None:
"""Print a header for the test"""
print(f"\n{'='*80}")
print(f"TESTING: {test_name}")
print(f"{'='*80}")


@task
def run_test_with_trigger(repo_name: str, trigger: str) -> None:
"""Run a test with a specific trigger"""
print(f"Running test with trigger: {trigger}")
# This would normally get an actual issue, but for testing we'd mock it
# For now, we'll just print what would happen
print(f"Would process repository {repo_name} with trigger {trigger}")


def create_test_flow() -> Flow:
"""Create a flow for testing all trigger combinations"""
with Flow("Test Response Agent Triggers") as flow:
# Get repositories to test with
repos = get_tracked_repos_task()

# Define triggers to test
triggers = ["feedback", "generate_edit_command", "new_response",
"develop_issue", "pr_comment", "standalone_pr"]

# Run tests for each combination of repo and trigger
for repo_name in repos:
for trigger in triggers:
header = print_test_header(f"{repo_name} - {trigger}")
test = run_test_with_trigger(repo_name, trigger)
test.set_upstream(header)

return flow


if __name__ == "__main__":
# Run the test flow
with raise_on_exception():
flow = create_test_flow()
flow.run()

# Also demonstrate running the main flow in test mode
print("\nRunning main flow in test mode...")
main_flow = create_prefect_flow(test_mode=True)
main_flow.run()
Loading