diff --git a/Makefile b/Makefile index 78fb602..77df8e9 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,12 @@ clean: find . -type d -name "dist" -exec rm -rf {} + find . -type d -name "build" -exec rm -rf {} + +test-workflows: + python -m tests.run_prefect_workflow_tests --debug + +test-workflows-normal: + python -m tests.run_prefect_workflow_tests --no-test-mode + help: @echo "Available targets:" @echo " all : Install dependencies and aider (default)" @@ -38,5 +44,7 @@ help: @echo " install-deps : Install Python dependencies from requirements.txt" @echo " install-aider: Install aider tool" @echo " test : Run tests with pytest" + @echo " test-workflows: Run Prefect workflow tests in test mode" + @echo " test-workflows-normal: Run Prefect workflow tests in normal mode" @echo " clean : Remove Python cache files and build artifacts" @echo " help : Show this help message" diff --git a/requirements-prefect.txt b/requirements-prefect.txt new file mode 100644 index 0000000..3d4c50f --- /dev/null +++ b/requirements-prefect.txt @@ -0,0 +1 @@ +prefect==0.15.8 diff --git a/requirements.txt b/requirements.txt index 6affd55..bc0d73b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ beautifulsoup4>=4.9.3 gitpython pytest pytest-cov +prefect>=1.0.0 diff --git a/src/response_agent.py b/src/response_agent.py index a46c1b6..0c84dda 100644 --- a/src/response_agent.py +++ b/src/response_agent.py @@ -6,8 +6,10 @@ 2. Skip: Skipped because triggers were not met (e.g., no bot tag, already responded) 3. Error: An error occurred during processing (e.g., exception thrown) """ -from typing import Optional, Tuple, List, Union +from typing import Optional, Tuple, List, Union, Dict, Any +from prefect import task, Flow, case +from prefect.tasks.control_flow import merge from dotenv import load_dotenv import string import triggers @@ -345,6 +347,7 @@ def summarize_relevant_comments( return summarized_comments, comment_list, summary_comment_str +@task(name="Generate Feedback Response") def generate_feedback_response( issue: Issue, repo_name: str, @@ -430,6 +433,7 @@ def generate_feedback_response( return updated_response, all_content +@task(name="Generate New Response") def generate_new_response( issue: Issue, repo_name: str, @@ -540,6 +544,7 @@ def generate_new_response( return response, all_content +@task(name="Generate Edit Command Response") def generate_edit_command_response( issue: Issue, repo_name: str, @@ -621,6 +626,7 @@ def generate_edit_command_response( ############################################################ +@task(name="Check Triggers") def check_triggers(issue: Issue) -> str: """ Check if the issue contains any triggers for generating a response @@ -644,23 +650,25 @@ def check_triggers(issue: Issue) -> str: return None -def response_selector(trigger: str) -> Callable: +@task(name="Response Selector") +def response_selector(trigger: str) -> Dict[str, Any]: """ - Generate a response for a GitHub issue using autogen agents + Select the appropriate response function based on the trigger Args: trigger: The trigger phrase for generating the response Returns: + Dictionary with the response function and its name """ if trigger == "feedback": - return generate_feedback_response + return {"func": generate_feedback_response, "name": "feedback"} elif trigger == "generate_edit_command": - return generate_edit_command_response + return {"func": generate_edit_command_response, "name": "generate_edit_command"} elif trigger == "new_response": - return generate_new_response + return {"func": generate_new_response, "name": "new_response"} else: - return None + return {"func": None, "name": "none"} def write_pr_comment( @@ -687,6 +695,7 @@ def write_pr_comment( pr_obj.create_issue_comment(write_str) +@task(name="Develop Issue Flow") def develop_issue_flow( issue_or_pr: Union[Issue, PullRequest], repo_name: str, @@ -784,6 +793,7 @@ def develop_issue_flow( return True, None +@task(name="Respond PR Comment Flow") def respond_pr_comment_flow( issue_or_pr: Union[Issue, PullRequest], repo_name: str, @@ -907,6 +917,7 @@ def respond_pr_comment_flow( return False, pr_msg +@task(name="Standalone PR Flow") def standalone_pr_flow( issue_or_pr: Union[Issue, PullRequest], repo_name: str, @@ -984,9 +995,11 @@ def standalone_pr_flow( raise Exception(error_msg) +@task(name="Process Issue") def process_issue( issue_or_pr: Union[Issue, PullRequest], repo_name: str, + test_mode: bool = False, ) -> Tuple[bool, Optional[str]]: """ Process a single issue or PR - check if it needs response and generate one @@ -1063,7 +1076,27 @@ def process_issue( else: # Generate and post response trigger = check_triggers(issue_or_pr) - response_func = response_selector(trigger) + response_info = response_selector(trigger) + response_func = response_info["func"] + + if test_mode: + # In test mode, simulate all triggers + tab_print( + f"TEST MODE: Simulating all triggers for {entity_type} #{issue_or_pr.number}") + test_triggers = ["feedback", + "generate_edit_command", "new_response"] + for test_trigger in test_triggers: + test_response_info = response_selector(test_trigger) + test_func = test_response_info["func"] + if test_func: + tab_print( + f"TEST MODE: Running {test_response_info['name']} flow") + test_response, test_content = test_func( + issue_or_pr, repo_name) + tab_print( + f"TEST MODE: {test_response_info['name']} flow completed") + return True, "Test mode completed all trigger simulations" + if response_func is None: # This is a skip outcome, not an error return False, f"No trigger found for {entity_type} #{issue_or_pr.number}" @@ -1142,8 +1175,10 @@ def run_aider(message: str, repo_path: str) -> str: raise RuntimeError(error_msg) +@task(name="Process Repository") def process_repository( repo_name: str, + test_mode: bool = False, ) -> None: """ Process all open issues and PRs in a repository @@ -1187,7 +1222,7 @@ def process_repository( entity_type = "PR" if is_pull_request(item) else "issue" # Process the issue/PR and determine the outcome - success, message = process_issue(item, repo_name) + success, message = process_issue(item, repo_name, test_mode) if success: # Success outcome tab_print( @@ -1242,23 +1277,55 @@ def initialize_bot() -> None: print('===============================') -if __name__ == '__main__': - # Initialize the bot (self-update) +@task(name="Get Tracked Repos") +def get_tracked_repos_task() -> List[str]: + """Prefect task wrapper for get_tracked_repos""" + return get_tracked_repos() + + +@task(name="Initialize Bot") +def initialize_bot_task() -> None: + """Prefect task wrapper for initialize_bot""" initialize_bot() - # Get list of repositories to process - tracked_repos = get_tracked_repos() - print(f'Found {len(tracked_repos)} tracked repositories') - pprint(tracked_repos) - # Process each repository - for repo_name in tracked_repos: - print(f'\n=== Processing repository: {repo_name} ===') - try: - process_repository(repo_name) - print(f'Completed processing {repo_name}') - except Exception as e: - tab_print(f'Error processing {repo_name}: {str(e)}') - continue +def create_prefect_flow(test_mode: bool = False) -> Flow: + """ + Create a Prefect flow for orchestrating the GitHub issue response workflow + + Args: + test_mode: Whether to run in test mode (simulating all triggers) + + Returns: + Prefect Flow object + """ + with Flow("GitHub Issue Response Flow") as flow: + # Initialize the bot + init = initialize_bot_task() + + # Get list of repositories to process + repos = get_tracked_repos_task() + + # Process each repository + for repo_name in repos: + process_task = process_repository(repo_name, test_mode) + # Set dependency to ensure initialization happens first + process_task.set_upstream(init) + + return flow + + +if __name__ == '__main__': + # Parse command line arguments + import argparse + parser = argparse.ArgumentParser( + description='Run the GitHub issue response agent') + parser.add_argument('--test', action='store_true', + help='Run in test mode (simulate all triggers)') + args = parser.parse_args() + + # Create and run the flow + flow = create_prefect_flow(test_mode=args.test) + flow.run() print('\nCompleted processing all repositories') diff --git a/src/test_response_agent.py b/src/test_response_agent.py new file mode 100644 index 0000000..5e7ff9e --- /dev/null +++ b/src/test_response_agent.py @@ -0,0 +1,77 @@ +""" +Test script for response_agent.py using Prefect to orchestrate workflows +""" +from src.response_agent import ( + generate_feedback_response, + generate_new_response, + generate_edit_command_response, + develop_issue_flow, + respond_pr_comment_flow, + standalone_pr_flow, + check_triggers, + response_selector, + process_issue, + process_repository, + get_tracked_repos_task, + initialize_bot_task, + create_prefect_flow +) +from prefect.utilities.debug import raise_on_exception +from prefect.engine.state import State +from prefect.engine.results import LocalResult +from prefect import Flow, task, Parameter +import os +import sys +from typing import List, Dict, Any + +# Add the parent directory to the path so we can import from src +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +@task +def print_test_header(test_name: str) -> None: + """Print a header for the test""" + print(f"\n{'='*80}") + print(f"TESTING: {test_name}") + print(f"{'='*80}") + + +@task +def run_test_with_trigger(repo_name: str, trigger: str) -> None: + """Run a test with a specific trigger""" + print(f"Running test with trigger: {trigger}") + # This would normally get an actual issue, but for testing we'd mock it + # For now, we'll just print what would happen + print(f"Would process repository {repo_name} with trigger {trigger}") + + +def create_test_flow() -> Flow: + """Create a flow for testing all trigger combinations""" + with Flow("Test Response Agent Triggers") as flow: + # Get repositories to test with + repos = get_tracked_repos_task() + + # Define triggers to test + triggers = ["feedback", "generate_edit_command", "new_response", + "develop_issue", "pr_comment", "standalone_pr"] + + # Run tests for each combination of repo and trigger + for repo_name in repos: + for trigger in triggers: + header = print_test_header(f"{repo_name} - {trigger}") + test = run_test_with_trigger(repo_name, trigger) + test.set_upstream(header) + + return flow + + +if __name__ == "__main__": + # Run the test flow + with raise_on_exception(): + flow = create_test_flow() + flow.run() + + # Also demonstrate running the main flow in test mode + print("\nRunning main flow in test mode...") + main_flow = create_prefect_flow(test_mode=True) + main_flow.run() diff --git a/tests/run_prefect_workflow_tests.py b/tests/run_prefect_workflow_tests.py new file mode 100644 index 0000000..eb97289 --- /dev/null +++ b/tests/run_prefect_workflow_tests.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +""" +Script to run Prefect workflow tests for response_agent.py + +This script sets up a test environment and runs all the Prefect workflows +defined in response_agent.py to ensure they work correctly. +""" +from src.response_agent import create_prefect_flow +import os +import sys +import argparse +from prefect import Flow, task +from prefect.utilities.debug import raise_on_exception + +# Add the src directory to the path so we can import modules +sys.path.insert(0, os.path.abspath( + os.path.join(os.path.dirname(__file__), '..'))) + + +def run_workflow_tests(test_mode=True, debug=False): + """ + Run all Prefect workflows in test mode + + Args: + test_mode: Whether to run in test mode (simulating all triggers) + debug: Whether to run in debug mode (raising exceptions) + """ + print("Starting Prefect workflow tests...") + + # Create the flow + flow = create_prefect_flow(test_mode=test_mode) + + # Run the flow with exception handling based on debug mode + if debug: + with raise_on_exception(): + state = flow.run() + else: + state = flow.run() + + # Check the result + if state.is_successful(): + print("All workflows completed successfully!") + return True + else: + print("Some workflows failed:") + for task_name, task_state in state.result.items(): + if not task_state.is_successful(): + print(f" - {task_name}: {task_state.message}") + return False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Run Prefect workflow tests for response_agent.py") + parser.add_argument("--no-test-mode", action="store_true", + help="Run in normal mode instead of test mode") + parser.add_argument("--debug", action="store_true", + help="Run in debug mode (raise exceptions)") + args = parser.parse_args() + + success = run_workflow_tests( + test_mode=not args.no_test_mode, debug=args.debug) + sys.exit(0 if success else 1) diff --git a/tests/test_response_agent_workflows.py b/tests/test_response_agent_workflows.py new file mode 100644 index 0000000..9c9e1bd --- /dev/null +++ b/tests/test_response_agent_workflows.py @@ -0,0 +1,349 @@ +""" +Tests for response_agent.py workflows using Prefect + +This test module ensures that all combinations of workflows in response_agent.py +are properly tested using Prefect for orchestration. +""" +import src.triggers as triggers +from src.response_agent import ( + check_triggers, + response_selector, + process_issue, + process_repository, + generate_new_response, + generate_feedback_response, + generate_edit_command_response, + develop_issue_flow, + respond_pr_comment_flow, + standalone_pr_flow, + create_prefect_flow +) +import os +import sys +import unittest +from unittest.mock import patch, MagicMock, Mock +import pytest +from prefect import Flow, task +from github.Issue import Issue +from github.PullRequest import PullRequest + +# Add the src directory to the path so we can import modules +sys.path.insert(0, os.path.abspath( + os.path.join(os.path.dirname(__file__), '..'))) + + +class TestResponseAgentWorkflows(unittest.TestCase): + """Test class for response_agent.py workflows""" + + def setUp(self): + """Set up test fixtures""" + # Mock issue + self.mock_issue = Mock(spec=Issue) + self.mock_issue.number = 123 + self.mock_issue.title = "Test Issue" + self.mock_issue.body = "This is a test issue" + self.mock_issue.labels = [] + + # Mock PR + self.mock_pr = Mock(spec=PullRequest) + self.mock_pr.number = 456 + self.mock_pr.title = "Test PR" + self.mock_pr.body = "This is a test PR" + self.mock_pr.labels = [] + + # Mock repo name + self.repo_name = "test-owner/test-repo" + + @patch('src.response_agent.triggers.has_generate_edit_command_trigger') + @patch('src.response_agent.triggers.has_user_feedback') + @patch('src.response_agent.triggers.has_bot_response') + def test_check_triggers(self, mock_has_bot_response, mock_has_user_feedback, + mock_has_generate_edit_command_trigger): + """Test check_triggers function with different trigger combinations""" + # Test generate_edit_command trigger + mock_has_generate_edit_command_trigger.return_value = True + mock_has_user_feedback.return_value = False + mock_has_bot_response.return_value = False + + result = check_triggers(self.mock_issue) + self.assertEqual(result, "generate_edit_command") + + # Test feedback trigger + mock_has_generate_edit_command_trigger.return_value = False + mock_has_user_feedback.return_value = True + mock_has_bot_response.return_value = True + + result = check_triggers(self.mock_issue) + self.assertEqual(result, "feedback") + + # Test new_response trigger + mock_has_generate_edit_command_trigger.return_value = False + mock_has_user_feedback.return_value = False + mock_has_bot_response.return_value = False + + result = check_triggers(self.mock_issue) + self.assertEqual(result, "new_response") + + # Test no trigger + mock_has_generate_edit_command_trigger.return_value = False + mock_has_user_feedback.return_value = False + mock_has_bot_response.return_value = True + + result = check_triggers(self.mock_issue) + self.assertIsNone(result) + + def test_response_selector(self): + """Test response_selector function with different triggers""" + # Test feedback trigger + result = response_selector("feedback") + self.assertEqual(result["name"], "feedback") + self.assertEqual(result["func"], generate_feedback_response) + + # Test generate_edit_command trigger + result = response_selector("generate_edit_command") + self.assertEqual(result["name"], "generate_edit_command") + self.assertEqual(result["func"], generate_edit_command_response) + + # Test new_response trigger + result = response_selector("new_response") + self.assertEqual(result["name"], "new_response") + self.assertEqual(result["func"], generate_new_response) + + # Test none trigger + result = response_selector(None) + self.assertEqual(result["name"], "none") + self.assertIsNone(result["func"]) + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.write_issue_response') + @patch('src.response_agent.generate_new_response') + def test_process_issue_new_response(self, mock_generate_new_response, mock_write_issue_response, + mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with new_response trigger""" + # Setup mocks + mock_is_pull_request.return_value = False + mock_has_blech_bot_tag.return_value = True + mock_generate_new_response.return_value = ( + "Test response", ["content"]) + + # Patch all trigger checks to simulate new_response trigger + with patch('src.response_agent.triggers.has_bot_response', return_value=False), \ + patch('src.response_agent.triggers.has_user_feedback', return_value=False), \ + patch('src.response_agent.triggers.has_pr_creation_comment', return_value=(False, None)), \ + patch('src.response_agent.triggers.has_develop_issue_trigger', return_value=False), \ + patch('src.response_agent.triggers.has_error_comment', return_value=False): + + result, message = process_issue(self.mock_issue, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_generate_new_response.assert_called_once_with( + self.mock_issue, self.repo_name) + mock_write_issue_response.assert_called_once() + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.write_issue_response') + @patch('src.response_agent.generate_feedback_response') + def test_process_issue_feedback(self, mock_generate_feedback_response, mock_write_issue_response, + mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with feedback trigger""" + # Setup mocks + mock_is_pull_request.return_value = False + mock_has_blech_bot_tag.return_value = True + mock_generate_feedback_response.return_value = ( + "Test feedback response", ["content"]) + + # Patch all trigger checks to simulate feedback trigger + with patch('src.response_agent.triggers.has_bot_response', return_value=True), \ + patch('src.response_agent.triggers.has_user_feedback', return_value=True), \ + patch('src.response_agent.triggers.has_pr_creation_comment', return_value=(False, None)), \ + patch('src.response_agent.triggers.has_develop_issue_trigger', return_value=False), \ + patch('src.response_agent.triggers.has_error_comment', return_value=False), \ + patch('src.response_agent.triggers.has_generate_edit_command_trigger', return_value=False): + + result, message = process_issue(self.mock_issue, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_generate_feedback_response.assert_called_once_with( + self.mock_issue, self.repo_name) + mock_write_issue_response.assert_called_once() + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.write_issue_response') + @patch('src.response_agent.generate_edit_command_response') + def test_process_issue_edit_command(self, mock_generate_edit_command_response, mock_write_issue_response, + mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with generate_edit_command trigger""" + # Setup mocks + mock_is_pull_request.return_value = False + mock_has_blech_bot_tag.return_value = True + mock_generate_edit_command_response.return_value = ( + "Test edit command", ["content"]) + + # Patch all trigger checks to simulate generate_edit_command trigger + with patch('src.response_agent.triggers.has_bot_response', return_value=True), \ + patch('src.response_agent.triggers.has_user_feedback', return_value=False), \ + patch('src.response_agent.triggers.has_pr_creation_comment', return_value=(False, None)), \ + patch('src.response_agent.triggers.has_develop_issue_trigger', return_value=False), \ + patch('src.response_agent.triggers.has_error_comment', return_value=False), \ + patch('src.response_agent.triggers.has_generate_edit_command_trigger', return_value=True): + + result, message = process_issue(self.mock_issue, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_generate_edit_command_response.assert_called_once_with( + self.mock_issue, self.repo_name) + mock_write_issue_response.assert_called_once() + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.develop_issue_flow') + def test_process_issue_develop_issue(self, mock_develop_issue_flow, mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with develop_issue trigger""" + # Setup mocks + mock_is_pull_request.return_value = False + mock_has_blech_bot_tag.return_value = True + mock_develop_issue_flow.return_value = (True, None) + + # Patch all trigger checks to simulate develop_issue trigger + with patch('src.response_agent.triggers.has_bot_response', return_value=True), \ + patch('src.response_agent.triggers.has_user_feedback', return_value=False), \ + patch('src.response_agent.triggers.has_pr_creation_comment', return_value=(False, None)), \ + patch('src.response_agent.triggers.has_develop_issue_trigger', return_value=True), \ + patch('src.response_agent.triggers.has_error_comment', return_value=False): + + result, message = process_issue(self.mock_issue, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_develop_issue_flow.assert_called_once_with( + self.mock_issue, self.repo_name, is_pr=False) + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.respond_pr_comment_flow') + def test_process_issue_pr_comment(self, mock_respond_pr_comment_flow, mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with PR comment trigger""" + # Setup mocks + mock_is_pull_request.return_value = False + mock_has_blech_bot_tag.return_value = True + mock_respond_pr_comment_flow.return_value = (True, None) + + # Patch all trigger checks to simulate PR comment trigger + with patch('src.response_agent.triggers.has_bot_response', return_value=True), \ + patch('src.response_agent.triggers.has_user_feedback', return_value=False), \ + patch('src.response_agent.triggers.has_pr_creation_comment', return_value=(True, "PR comment")), \ + patch('src.response_agent.triggers.has_develop_issue_trigger', return_value=False), \ + patch('src.response_agent.triggers.has_error_comment', return_value=False): + + result, message = process_issue(self.mock_issue, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_respond_pr_comment_flow.assert_called_once_with( + self.mock_issue, self.repo_name, "PR comment") + + @patch('src.response_agent.triggers.has_blech_bot_tag') + @patch('src.response_agent.is_pull_request') + @patch('src.response_agent.standalone_pr_flow') + def test_process_issue_standalone_pr(self, mock_standalone_pr_flow, mock_is_pull_request, mock_has_blech_bot_tag): + """Test process_issue function with standalone PR flow""" + # Setup mocks + mock_is_pull_request.return_value = True + mock_has_blech_bot_tag.return_value = True + mock_standalone_pr_flow.return_value = (True, None) + + # Patch all trigger checks + with patch('src.response_agent.triggers.has_error_comment', return_value=False): + + result, message = process_issue(self.mock_pr, self.repo_name) + + # Verify results + self.assertTrue(result) + self.assertIsNone(message) + mock_standalone_pr_flow.assert_called_once_with( + self.mock_pr, self.repo_name) + + @patch('src.response_agent.get_github_client') + @patch('src.response_agent.get_repository') + @patch('src.response_agent.bot_tools.get_local_repo_path') + @patch('src.response_agent.clone_repository') + @patch('src.response_agent.update_repository') + @patch('src.response_agent.checkout_branch') + @patch('src.response_agent.process_issue') + def test_process_repository(self, mock_process_issue, mock_checkout_branch, mock_update_repository, + mock_clone_repository, mock_get_local_repo_path, mock_get_repository, + mock_get_github_client): + """Test process_repository function""" + # Setup mocks + mock_repo = MagicMock() + mock_repo.default_branch = "main" + mock_repo.get_issues.return_value = [self.mock_issue, self.mock_pr] + + mock_get_github_client.return_value = MagicMock() + mock_get_repository.return_value = mock_repo + mock_get_local_repo_path.return_value = "/path/to/repo" + mock_process_issue.return_value = (True, None) + + # Test with existing repo + with patch('os.path.exists', return_value=True): + process_repository(self.repo_name) + + # Verify results + mock_get_github_client.assert_called_once() + mock_get_repository.assert_called_once() + mock_get_local_repo_path.assert_called_once_with(self.repo_name) + mock_clone_repository.assert_not_called() + mock_update_repository.assert_called_once() + mock_checkout_branch.assert_called_once() + self.assertEqual(mock_process_issue.call_count, 2) + + # Reset mocks + mock_get_github_client.reset_mock() + mock_get_repository.reset_mock() + mock_get_local_repo_path.reset_mock() + mock_clone_repository.reset_mock() + mock_update_repository.reset_mock() + mock_checkout_branch.reset_mock() + mock_process_issue.reset_mock() + + # Test with non-existing repo + with patch('os.path.exists', return_value=False): + mock_clone_repository.return_value = "/path/to/cloned/repo" + + process_repository(self.repo_name) + + # Verify results + mock_get_github_client.assert_called_once() + mock_get_repository.assert_called_once() + mock_get_local_repo_path.assert_called_once_with(self.repo_name) + mock_clone_repository.assert_called_once() + mock_update_repository.assert_called_once() + mock_checkout_branch.assert_called_once() + self.assertEqual(mock_process_issue.call_count, 2) + + def test_create_prefect_flow(self): + """Test create_prefect_flow function""" + # Test normal mode + flow = create_prefect_flow(test_mode=False) + self.assertIsInstance(flow, Flow) + self.assertEqual(flow.name, "GitHub Issue Response Flow") + + # Test test mode + flow = create_prefect_flow(test_mode=True) + self.assertIsInstance(flow, Flow) + self.assertEqual(flow.name, "GitHub Issue Response Flow") + + +if __name__ == '__main__': + unittest.main()