diff --git a/README.md b/README.md index baddaba..b59458d 100644 --- a/README.md +++ b/README.md @@ -156,6 +156,7 @@ This action can be configured to authenticate with GitHub App Installation or Pe | `HIDE_TIME_TO_FIRST_RESPONSE` | False | False | If set to `true`, the time to first response will not be displayed in the generated Markdown file. | | `HIDE_STATUS` | False | True | If set to `true`, the status column will not be shown | | `HIDE_CREATED_AT` | False | True | If set to `true`, the creation timestamp will not be displayed in the generated Markdown file. | +| `HIDE_PR_STATISTICS` | False | True | If set to `true`, PR comment statistics (mean, median, 90th percentile, and individual PR comment counts) will not be displayed in the generated Markdown file. | | `DRAFT_PR_TRACKING` | False | False | If set to `true`, draft PRs will be included in the metrics as a new column and in the summary stats. | | `IGNORE_USERS` | False | False | A comma separated list of users to ignore when calculating metrics. (ie. `IGNORE_USERS: 'user1,user2'`). To ignore bots, append `[bot]` to the user (ie. `IGNORE_USERS: 'github-actions[bot]'`) Users in this list will also have their authored issues and pull requests removed from the Markdown table. | | `ENABLE_MENTOR_COUNT` | False | False | If set to 'TRUE' count number of comments users left on discussions, issues and PRs and display number of active mentors | diff --git a/classes.py b/classes.py index d24f430..bc2df19 100644 --- a/classes.py +++ b/classes.py @@ -25,6 +25,7 @@ class IssueWithMetrics: mentor_activity (dict, optional): A dictionary containing active mentors created_at (datetime, optional): The time the issue was created. status (str, optional): The status of the issue, e.g., "open", "closed as completed", + pr_comment_count (int, optional): The number of comments on the PR (excluding bots). """ # pylint: disable=too-many-instance-attributes @@ -44,6 +45,7 @@ def __init__( assignee=None, assignees=None, status=None, + pr_comment_count=None, ): self.title = title self.html_url = html_url @@ -58,3 +60,4 @@ def __init__( self.mentor_activity = mentor_activity self.created_at = created_at self.status = status + self.pr_comment_count = pr_comment_count diff --git a/config.py b/config.py index 475aa34..a41b0b9 100644 --- a/config.py +++ b/config.py @@ -57,6 +57,7 @@ class EnvVars: rate_limit_bypass (bool): If set to TRUE, bypass the rate limit for the GitHub API draft_pr_tracking (bool): If set to TRUE, track PR time in draft state in addition to other metrics + hide_pr_statistics (bool): If set to TRUE, hide PR comment statistics in the output """ def __init__( @@ -88,6 +89,7 @@ def __init__( output_file: str, rate_limit_bypass: bool = False, draft_pr_tracking: bool = False, + hide_pr_statistics: bool = True, ): self.gh_app_id = gh_app_id self.gh_app_installation_id = gh_app_installation_id @@ -116,6 +118,7 @@ def __init__( self.output_file = output_file self.rate_limit_bypass = rate_limit_bypass self.draft_pr_tracking = draft_pr_tracking + self.hide_pr_statistics = hide_pr_statistics def __repr__(self): return ( @@ -147,6 +150,7 @@ def __repr__(self): f"{self.output_file}" f"{self.rate_limit_bypass}" f"{self.draft_pr_tracking}" + f"{self.hide_pr_statistics}" ) @@ -244,6 +248,7 @@ def get_env_vars(test: bool = False) -> EnvVars: hide_time_to_first_response = get_bool_env_var("HIDE_TIME_TO_FIRST_RESPONSE", False) hide_created_at = get_bool_env_var("HIDE_CREATED_AT", True) hide_status = get_bool_env_var("HIDE_STATUS", True) + hide_pr_statistics = get_bool_env_var("HIDE_PR_STATISTICS", True) enable_mentor_count = get_bool_env_var("ENABLE_MENTOR_COUNT", False) min_mentor_comments = os.getenv("MIN_MENTOR_COMMENTS", "10") max_comments_eval = os.getenv("MAX_COMMENTS_EVAL", "20") @@ -278,4 +283,5 @@ def get_env_vars(test: bool = False) -> EnvVars: output_file, rate_limit_bypass, draft_pr_tracking, + hide_pr_statistics, ) diff --git a/issue_metrics.py b/issue_metrics.py index a7dd504..2e2b9d0 100644 --- a/issue_metrics.py +++ b/issue_metrics.py @@ -30,6 +30,7 @@ from markdown_helpers import markdown_too_large_for_issue_body, split_markdown_file from markdown_writer import write_to_markdown from most_active_mentors import count_comments_per_user, get_mentor_count +from pr_comments import count_pr_comments, get_stats_pr_comments from search import get_owners_and_repositories, search_issues from time_in_draft import get_stats_time_in_draft, measure_time_in_draft from time_to_answer import get_stats_time_to_answer, measure_time_to_answer @@ -153,6 +154,12 @@ def get_per_issue_metrics( f"An error occurred processing review comments. Perhaps the review contains a ghost user. {e}" ) + # Count PR comments if this is a pull request and statistics are not hidden + if pull_request and not env_vars.hide_pr_statistics: + issue_with_metrics.pr_comment_count = count_pr_comments( + issue, pull_request, ignore_users + ) + if env_vars.hide_time_to_first_response is False: issue_with_metrics.time_to_first_response = ( measure_time_to_first_response( @@ -302,6 +309,7 @@ def main(): # pragma: no cover average_time_to_answer=None, average_time_in_draft=None, average_time_in_labels=None, + stats_pr_comments=None, num_issues_opened=None, num_issues_closed=None, num_mentor_count=None, @@ -329,6 +337,7 @@ def main(): # pragma: no cover average_time_to_answer=None, average_time_in_draft=None, average_time_in_labels=None, + stats_pr_comments=None, num_issues_opened=None, num_issues_closed=None, num_mentor_count=None, @@ -362,6 +371,7 @@ def main(): # pragma: no cover stats_time_to_answer = get_stats_time_to_answer(issues_with_metrics) stats_time_in_draft = get_stats_time_in_draft(issues_with_metrics) + stats_pr_comments = get_stats_pr_comments(issues_with_metrics) num_mentor_count = 0 if enable_mentor_count: @@ -379,6 +389,7 @@ def main(): # pragma: no cover stats_time_to_answer=stats_time_to_answer, stats_time_in_draft=stats_time_in_draft, stats_time_in_labels=stats_time_in_labels, + stats_pr_comments=stats_pr_comments, num_issues_opened=num_issues_open, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, @@ -393,6 +404,7 @@ def main(): # pragma: no cover average_time_to_answer=stats_time_to_answer, average_time_in_draft=stats_time_in_draft, average_time_in_labels=stats_time_in_labels, + stats_pr_comments=stats_pr_comments, num_issues_opened=num_issues_open, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, diff --git a/json_writer.py b/json_writer.py index 1128560..5dcd288 100644 --- a/json_writer.py +++ b/json_writer.py @@ -21,7 +21,7 @@ import json import os from datetime import timedelta -from typing import Any, List, Union +from typing import Any, Dict, List, Union from classes import IssueWithMetrics @@ -33,6 +33,7 @@ def write_to_json( stats_time_to_answer: Union[dict[str, timedelta], None], stats_time_in_draft: Union[dict[str, timedelta], None], stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], + stats_pr_comments: Union[Dict[str, float], None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], num_mentor_count: Union[int, None], @@ -142,6 +143,15 @@ def write_to_json( for label, time in stats_time_in_labels["90p"].items(): p90_time_in_labels[label] = str(time) + # PR comments statistics + average_pr_comments = None + med_pr_comments = None + p90_pr_comments = None + if stats_pr_comments is not None: + average_pr_comments = stats_pr_comments["avg"] + med_pr_comments = stats_pr_comments["med"] + p90_pr_comments = stats_pr_comments["90p"] + # Create a dictionary with the metrics metrics: dict[str, Any] = { "average_time_to_first_response": str(average_time_to_first_response), @@ -159,6 +169,9 @@ def write_to_json( "90_percentile_time_to_answer": str(p90_time_to_answer), "90_percentile_time_in_draft": str(p90_time_in_draft), "90_percentile_time_in_labels": p90_time_in_labels, + "average_pr_comments": average_pr_comments, + "median_pr_comments": med_pr_comments, + "90_percentile_pr_comments": p90_pr_comments, "num_items_opened": num_issues_opened, "num_items_closed": num_issues_closed, "num_mentor_count": num_mentor_count, @@ -184,6 +197,7 @@ def write_to_json( "time_to_answer": str(issue.time_to_answer), "time_in_draft": str(issue.time_in_draft), "label_metrics": formatted_label_metrics, + "pr_comment_count": issue.pr_comment_count, "created_at": str(issue.created_at), } ) diff --git a/markdown_writer.py b/markdown_writer.py index 67cc241..207118d 100644 --- a/markdown_writer.py +++ b/markdown_writer.py @@ -91,6 +91,10 @@ def get_non_hidden_columns(labels) -> List[str]: if not hide_status: columns.append("Status") + hide_pr_statistics = env_vars.hide_pr_statistics + if not hide_pr_statistics: + columns.append("PR Comments") + return columns @@ -101,6 +105,7 @@ def write_to_markdown( average_time_to_answer: Union[dict[str, timedelta], None], average_time_in_draft: Union[dict[str, timedelta], None], average_time_in_labels: Union[dict, None], + stats_pr_comments: Union[dict[str, float], None], num_issues_opened: Union[int, None], num_issues_closed: Union[int, None], num_mentor_count: Union[int, None], @@ -146,6 +151,7 @@ def write_to_markdown( """ columns = get_non_hidden_columns(labels) + env_vars = get_env_vars() output_file_name = output_file if output_file else "issue_metrics.md" with open(output_file_name, "w", encoding="utf-8") as file: file.write(f"# {report_title}\n\n") @@ -169,6 +175,7 @@ def write_to_markdown( average_time_to_answer, average_time_in_draft, average_time_in_labels, + stats_pr_comments, num_issues_opened, num_issues_closed, num_mentor_count, @@ -178,6 +185,7 @@ def write_to_markdown( hide_label_metrics, hide_items_closed_count, enable_mentor_count, + env_vars.hide_pr_statistics, ) # Write second table with individual issue/pr/discussion metrics @@ -238,6 +246,8 @@ def write_to_markdown( file.write(f" {issue.created_at} |") if "Status" in columns: file.write(f" {issue.status} |") + if "PR Comments" in columns: + file.write(f" {issue.pr_comment_count or 'N/A'} |") file.write("\n") file.write( "\n_This report was generated with the \ @@ -256,6 +266,7 @@ def write_overall_metrics_tables( stats_time_to_answer, average_time_in_draft, stats_time_in_labels, + stats_pr_comments, num_issues_opened, num_issues_closed, num_mentor_count, @@ -265,17 +276,23 @@ def write_overall_metrics_tables( hide_label_metrics, hide_items_closed_count=False, enable_mentor_count=False, + hide_pr_statistics=True, ): """Write the overall metrics tables to the markdown file.""" - if any( - column in columns - for column in [ - "Time to first response", - "Time to close", - "Time to answer", - "Time in draft", - ] - ) or (hide_label_metrics is False and len(labels) > 0): + + if ( + any( + column in columns + for column in [ + "Time to first response", + "Time to close", + "Time to answer", + "Time in draft", + ] + ) + or (hide_label_metrics is False and len(labels) > 0) + or (not hide_pr_statistics and stats_pr_comments is not None) + ): file.write("| Metric | Average | Median | 90th percentile |\n") file.write("| --- | --- | --- | ---: |\n") if "Time to first response" in columns: @@ -330,6 +347,16 @@ def write_overall_metrics_tables( f"| {stats_time_in_labels['med'][label]} " f"| {stats_time_in_labels['90p'][label]} |\n" ) + + # Add PR comment statistics if not hidden + if not hide_pr_statistics and stats_pr_comments is not None: + file.write( + f"| Number of comments per PR " + f"| {stats_pr_comments['avg']} " + f"| {stats_pr_comments['med']} " + f"| {stats_pr_comments['90p']} |\n" + ) + if "Status" in columns: # Add logic for the 'status' column file.write("| Status | | | |\n") diff --git a/pr_comments.py b/pr_comments.py new file mode 100644 index 0000000..959f713 --- /dev/null +++ b/pr_comments.py @@ -0,0 +1,115 @@ +"""A module for measuring the number of comments on pull requests. + +This module provides functions for counting comments on GitHub pull requests, +excluding bot comments, and calculating statistics about comment counts. + +Functions: + count_pr_comments( + issue: Union[github3.issues.Issue, None], + pull_request: Union[github3.pulls.PullRequest, None], + ignore_users: Union[List[str], None] = None, + ) -> Union[int, None]: + Count the number of comments on a pull request, excluding bot comments. + get_stats_pr_comments( + issues_with_metrics: List[IssueWithMetrics], + ) -> Union[dict[str, float], None]: + Calculate stats describing the comment count for a list of pull requests. +""" + +from typing import List, Union + +import github3 +import numpy +from classes import IssueWithMetrics + + +def count_pr_comments( + issue: Union[github3.issues.Issue, None], # type: ignore + pull_request: Union[github3.pulls.PullRequest, None] = None, + ignore_users: Union[List[str], None] = None, +) -> Union[int, None]: + """Count the number of comments on a pull request, excluding bot comments. + + Args: + issue (Union[github3.issues.Issue, None]): A GitHub issue. + pull_request (Union[github3.pulls.PullRequest, None]): A GitHub pull request. + ignore_users (Union[List[str], None]): A list of GitHub usernames to ignore. + + Returns: + Union[int, None]: The number of comments on the pull request, excluding bots. + Returns None if not a pull request. + """ + if not pull_request or not issue: + return None + + if ignore_users is None: + ignore_users = [] + + comment_count = 0 + + # Count issue comments + try: + comments = issue.issue.comments() # type: ignore + for comment in comments: + # Skip bot comments and ignored users + if ( + str(comment.user.type.lower()) != "bot" + and comment.user.login not in ignore_users + ): + comment_count += 1 + except (AttributeError, TypeError): + # If we can't get comments, just continue + pass + + # Count pull request review comments + try: + review_comments = pull_request.review_comments() + for comment in review_comments: + # Skip bot comments and ignored users + if ( + str(comment.user.type.lower()) != "bot" + and comment.user.login not in ignore_users + ): + comment_count += 1 + except (AttributeError, TypeError): + # If we can't get review comments, just continue + pass + + return comment_count + + +def get_stats_pr_comments( + issues_with_metrics: List[IssueWithMetrics], +) -> Union[dict[str, float], None]: + """Calculate stats describing the comment count for a list of pull requests. + + Args: + issues_with_metrics (List[IssueWithMetrics]): A list of GitHub issues with metrics attached. + + Returns: + Union[Dict[str, float], None]: The stats describing comment counts for PRs. + """ + # Filter out issues that are not pull requests or have no comment count + prs_with_comment_counts = [ + issue.pr_comment_count + for issue in issues_with_metrics + if issue.pr_comment_count is not None + ] + + if not prs_with_comment_counts: + return None + + # Calculate statistics + average_comment_count = numpy.round(numpy.average(prs_with_comment_counts), 1) + median_comment_count = numpy.round(numpy.median(prs_with_comment_counts), 1) + ninety_percentile_comment_count = numpy.round( + numpy.percentile(prs_with_comment_counts, 90), 1 + ) + + stats = { + "avg": average_comment_count, + "med": median_comment_count, + "90p": ninety_percentile_comment_count, + } + + return stats diff --git a/test_assignee_integration.py b/test_assignee_integration.py index 3495b77..1af28e6 100644 --- a/test_assignee_integration.py +++ b/test_assignee_integration.py @@ -67,6 +67,7 @@ def test_assignee_in_markdown_output(self): average_time_to_answer=None, average_time_in_draft=None, average_time_in_labels=None, + stats_pr_comments=None, num_issues_opened=2, num_issues_closed=1, num_mentor_count=0, @@ -144,6 +145,7 @@ def test_assignee_in_json_output(self): stats_time_to_answer=None, stats_time_in_draft=None, stats_time_in_labels=None, + stats_pr_comments=None, num_issues_opened=2, num_issues_closed=1, num_mentor_count=0, diff --git a/test_column_order_fix.py b/test_column_order_fix.py index c186ad7..45fcfc6 100644 --- a/test_column_order_fix.py +++ b/test_column_order_fix.py @@ -59,6 +59,7 @@ def test_status_and_created_at_columns_alignment(self): average_time_to_answer=None, average_time_in_draft=None, average_time_in_labels=None, + stats_pr_comments=None, num_issues_opened=1, num_issues_closed=0, num_mentor_count=0, diff --git a/test_config.py b/test_config.py index 49435fa..724e986 100644 --- a/test_config.py +++ b/test_config.py @@ -78,6 +78,7 @@ def setUp(self): "HIDE_TIME_TO_ANSWER", "HIDE_TIME_TO_CLOSE", "HIDE_TIME_TO_FIRST_RESPONSE", + "HIDE_PR_STATISTICS", "IGNORE_USERS", "LABELS_TO_MEASURE", "NON_MENTIONING_LINKS", @@ -291,6 +292,7 @@ def test_get_env_vars_optional_values(self): output_file="issue_metrics.md", rate_limit_bypass=True, draft_pr_tracking=True, + hide_pr_statistics=True, ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) @@ -336,6 +338,7 @@ def test_get_env_vars_optionals_are_defaulted(self): output_file="", rate_limit_bypass=False, draft_pr_tracking=False, + hide_pr_statistics=True, ) result = get_env_vars(True) self.assertEqual(str(result), str(expected_result)) diff --git a/test_json_writer.py b/test_json_writer.py index 3ace419..3a6a24f 100644 --- a/test_json_writer.py +++ b/test_json_writer.py @@ -91,6 +91,9 @@ def test_write_to_json(self): "90_percentile_time_to_answer": "3 days, 0:00:00", "90_percentile_time_in_draft": "1 day, 0:00:00", "90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"}, + "average_pr_comments": None, + "median_pr_comments": None, + "90_percentile_pr_comments": None, "num_items_opened": 2, "num_items_closed": 1, "num_mentor_count": 5, @@ -107,6 +110,7 @@ def test_write_to_json(self): "time_to_answer": "None", "time_in_draft": "1 day, 0:00:00", "label_metrics": {"bug": "1 day, 16:24:12"}, + "pr_comment_count": None, "created_at": "-5 days, 0:00:00", }, { @@ -120,6 +124,7 @@ def test_write_to_json(self): "time_to_answer": "1 day, 0:00:00", "time_in_draft": "None", "label_metrics": {}, + "pr_comment_count": None, "created_at": "-5 days, 0:00:00", }, ], @@ -135,6 +140,7 @@ def test_write_to_json(self): stats_time_to_answer=stats_time_to_answer, stats_time_in_draft=stats_time_in_draft, stats_time_in_labels=stats_time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, @@ -202,6 +208,9 @@ def test_write_to_json_with_no_response(self): "90_percentile_time_to_answer": "None", "90_percentile_time_in_draft": "None", "90_percentile_time_in_labels": {}, + "average_pr_comments": None, + "median_pr_comments": None, + "90_percentile_pr_comments": None, "num_items_opened": 2, "num_items_closed": 0, "num_mentor_count": 5, @@ -218,6 +227,7 @@ def test_write_to_json_with_no_response(self): "time_to_answer": "None", "time_in_draft": "None", "label_metrics": {}, + "pr_comment_count": None, "created_at": "None", }, { @@ -231,6 +241,7 @@ def test_write_to_json_with_no_response(self): "time_to_answer": "None", "time_in_draft": "None", "label_metrics": {}, + "pr_comment_count": None, "created_at": "None", }, ], @@ -246,6 +257,7 @@ def test_write_to_json_with_no_response(self): stats_time_to_answer=stats_time_to_answer, stats_time_in_draft=stats_time_in_draft, stats_time_in_labels=stats_time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, diff --git a/test_markdown_writer.py b/test_markdown_writer.py index c79536b..cabd2d2 100644 --- a/test_markdown_writer.py +++ b/test_markdown_writer.py @@ -107,6 +107,7 @@ def test_write_to_markdown(self): average_time_to_answer=time_to_answer, average_time_in_draft=time_in_draft, average_time_in_labels=time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, @@ -226,6 +227,7 @@ def test_write_to_markdown_with_vertical_bar_in_title(self): average_time_to_answer=average_time_to_answer, average_time_in_draft=average_time_in_draft, average_time_in_labels=average_time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, @@ -281,6 +283,7 @@ def test_write_to_markdown_no_issues(self): None, None, None, + None, report_title="Issue Metrics", ) @@ -378,8 +381,9 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, - average_time_in_labels=average_time_in_labels, average_time_in_draft=average_time_in_draft, + average_time_in_labels=average_time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, @@ -409,7 +413,8 @@ def test_writes_markdown_file_with_non_hidden_columns_only(self): "| --- | --- | --- | --- | --- | --- |\n" "| Issue 1 | https://www.ghe.com/user/repo/issues/1 | [charlie](https://ghe.com/charlie) | " "[alice](https://ghe.com/alice) | -5 days, 0:00:00 | None |\n" - "| Issue 2 | https://www.ghe.com/user/repo/issues/2 | None | [bob](https://ghe.com/bob) | -5 days, 0:00:00 | None |\n\n" + "| Issue 2 | https://www.ghe.com/user/repo/issues/2 | None | " + "[bob](https://ghe.com/bob) | -5 days, 0:00:00 | None |\n\n" "_This report was generated with the [Issue Metrics Action](https://github.com/github/issue-metrics)_\n" "Search query used to find these items: `repo:user/repo is:issue`\n" ) @@ -487,8 +492,9 @@ def test_writes_markdown_file_with_hidden_status_column(self): average_time_to_first_response=average_time_to_first_response, average_time_to_close=average_time_to_close, average_time_to_answer=average_time_to_answer, - average_time_in_labels=average_time_in_labels, average_time_in_draft=average_time_in_draft, + average_time_in_labels=average_time_in_labels, + stats_pr_comments=None, num_issues_opened=num_issues_opened, num_issues_closed=num_issues_closed, num_mentor_count=num_mentor_count, diff --git a/test_pr_comments.py b/test_pr_comments.py new file mode 100644 index 0000000..2ab0595 --- /dev/null +++ b/test_pr_comments.py @@ -0,0 +1,138 @@ +"""Tests for the pr_comments module. + +This module contains tests for the count_pr_comments and get_stats_pr_comments +functions. +""" + +import unittest +from unittest.mock import MagicMock + +from classes import IssueWithMetrics +from pr_comments import count_pr_comments, get_stats_pr_comments + + +class TestCountPRComments(unittest.TestCase): + """Test the count_pr_comments function.""" + + def test_count_pr_comments_with_comments(self): + """Test counting PR comments with actual comments.""" + # Mock issue with comments + mock_issue = MagicMock() + mock_comment1 = MagicMock() + mock_comment1.user.type = "User" + mock_comment1.user.login = "user1" + mock_comment2 = MagicMock() + mock_comment2.user.type = "User" + mock_comment2.user.login = "user2" + mock_issue.issue.comments.return_value = [mock_comment1, mock_comment2] + + # Mock pull request with review comments + mock_pull_request = MagicMock() + mock_review_comment1 = MagicMock() + mock_review_comment1.user.type = "User" + mock_review_comment1.user.login = "user3" + mock_pull_request.review_comments.return_value = [mock_review_comment1] + + result = count_pr_comments(mock_issue, mock_pull_request, []) + self.assertEqual(result, 3) + + def test_count_pr_comments_with_bots_ignored(self): + """Test that bot comments are ignored.""" + # Mock issue with bot comment + mock_issue = MagicMock() + mock_bot_comment = MagicMock() + mock_bot_comment.user.type = "Bot" + mock_bot_comment.user.login = "github-actions[bot]" + mock_user_comment = MagicMock() + mock_user_comment.user.type = "User" + mock_user_comment.user.login = "user1" + mock_issue.issue.comments.return_value = [mock_bot_comment, mock_user_comment] + + mock_pull_request = MagicMock() + mock_pull_request.review_comments.return_value = [] + + result = count_pr_comments(mock_issue, mock_pull_request, []) + self.assertEqual(result, 1) + + def test_count_pr_comments_with_ignored_users(self): + """Test that ignored users are not counted.""" + # Mock issue with comments from ignored user + mock_issue = MagicMock() + mock_comment1 = MagicMock() + mock_comment1.user.type = "User" + mock_comment1.user.login = "ignored_user" + mock_comment2 = MagicMock() + mock_comment2.user.type = "User" + mock_comment2.user.login = "regular_user" + mock_issue.issue.comments.return_value = [mock_comment1, mock_comment2] + + mock_pull_request = MagicMock() + mock_pull_request.review_comments.return_value = [] + + result = count_pr_comments(mock_issue, mock_pull_request, ["ignored_user"]) + self.assertEqual(result, 1) + + def test_count_pr_comments_no_pull_request(self): + """Test that None is returned when no pull request is provided.""" + mock_issue = MagicMock() + result = count_pr_comments(mock_issue, None, []) + self.assertIsNone(result) + + def test_count_pr_comments_no_issue(self): + """Test that None is returned when no issue is provided.""" + mock_pull_request = MagicMock() + result = count_pr_comments(None, mock_pull_request, []) + self.assertIsNone(result) + + def test_count_pr_comments_exception_handling(self): + """Test that exceptions are handled gracefully.""" + # Mock issue that raises exception + mock_issue = MagicMock() + mock_issue.issue.comments.side_effect = AttributeError("No comments") + + mock_pull_request = MagicMock() + mock_pull_request.review_comments.side_effect = AttributeError( + "No review comments" + ) + + result = count_pr_comments(mock_issue, mock_pull_request, []) + self.assertEqual(result, 0) + + +class TestGetStatsPRComments(unittest.TestCase): + """Test the get_stats_pr_comments function.""" + + def test_get_stats_pr_comments_with_data(self): + """Test calculating PR comment statistics with data.""" + issues_with_metrics = [ + IssueWithMetrics("PR 1", "url1", "user1", pr_comment_count=5), + IssueWithMetrics("PR 2", "url2", "user2", pr_comment_count=10), + IssueWithMetrics("PR 3", "url3", "user3", pr_comment_count=3), + IssueWithMetrics("Issue 1", "url4", "user4"), # No comment count (not a PR) + ] + + result = get_stats_pr_comments(issues_with_metrics) + + self.assertIsNotNone(result) + self.assertEqual(result["avg"], 6.0) # (5+10+3)/3 + self.assertEqual(result["med"], 5.0) + self.assertEqual(result["90p"], 9.0) # 90th percentile + + def test_get_stats_pr_comments_no_data(self): + """Test calculating PR comment statistics with no PR data.""" + issues_with_metrics = [ + IssueWithMetrics("Issue 1", "url1", "user1"), # No comment count + IssueWithMetrics("Issue 2", "url2", "user2"), # No comment count + ] + + result = get_stats_pr_comments(issues_with_metrics) + self.assertIsNone(result) + + def test_get_stats_pr_comments_empty_list(self): + """Test calculating PR comment statistics with empty list.""" + result = get_stats_pr_comments([]) + self.assertIsNone(result) + + +if __name__ == "__main__": + unittest.main()