diff --git a/README.md b/README.md index 9da7d20..7814fe0 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,8 @@ The action will run automatically on new and reopened pull requests, analyzing t - **agent-scan-comment** (optional): Enable/disable posting comments on PRs (default: true). Set to false if you only want to use the outputs - **cache-path** (optional): Path to cache directory for storing analysis results (e.g., `.agentscan-cache`). When provided, analysis results are cached and reused within the TTL period - **skip-comment-on-organic** (optional): Skip posting PR comment if analysis result is "organic" (default: false) +- **ai-analysis** (optional): Enable AI-powered analysis using GitHub Models to assess if the user is automated (default: false). Requires `models: read` permission in your workflow +- **ai-model** (optional): The GitHub Models model to use for AI analysis (default: `openai/gpt-4o-mini`) ### Skip Members @@ -112,6 +114,41 @@ To disable all PR comments and only use the action's outputs, set `agent-scan-co This is useful if you want to use the analysis outputs in downstream steps without posting comments. +### AI Analysis + +Enable AI-powered analysis using [GitHub Models](https://docs.github.com/en/github-models) to get an AI assessment of whether the user appears automated. This uses the GitHub Models inference API and requires the `models: read` permission. + +```yaml +name: AgentScan + +on: + pull_request_target: + types: [opened, reopened] + +permissions: + pull-requests: write + contents: read + models: read + +jobs: + agentscan: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: AgentScan + uses: MatteoGabriele/agentscan-action@v1.0.1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + ai-analysis: true + ai-model: "openai/gpt-4o-mini" # optional, this is the default +``` + +When enabled, the AI assessment is: +- Included in the PR comment under an **AI Assessment** section +- Available via the `ai-assessment` output for use in downstream steps + +The AI analysis is non-blocking — if the request fails, the action warns and continues with the heuristic-only analysis. + ## Testing Run tests with vitest: diff --git a/action.yml b/action.yml index bbb90c9..6150fc1 100644 --- a/action.yml +++ b/action.yml @@ -21,6 +21,14 @@ inputs: description: "Path to cache directory (e.g., '.agentscan-cache') - if provided, analysis results will be cached" required: false default: "" + ai-analysis: + description: "Enable AI-powered analysis using GitHub Models to assess if the user is automated. Requires 'models: read' permission in your workflow." + required: false + default: "false" + ai-model: + description: "The GitHub Models model to use for AI analysis (e.g., 'openai/gpt-4o-mini', 'mistral-ai/ministral-3b')" + required: false + default: "openai/gpt-4o-mini" outputs: flagged: description: "Whether the account was flagged" @@ -36,6 +44,8 @@ outputs: description: "Account age in days" username: description: "The GitHub username that was analyzed" + ai-assessment: + description: "JSON object with AI classification, confidence (0-100), and reasoning (only when ai-analysis is enabled)" branding: icon: "activity" color: "white" diff --git a/package-lock.json b/package-lock.json index e7738ce..c4ebcb9 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "dependencies": { "@actions/core": "^3.0.0", "@actions/github": "^9.0.0", + "voight-kampff-compactor": "^1.0.0", "voight-kampff-test": "^2.5.0" }, "devDependencies": { @@ -874,9 +875,6 @@ "arm64" ], "dev": true, - "libc": [ - "glibc" - ], "license": "MIT", "optional": true, "os": [ @@ -894,9 +892,6 @@ "arm64" ], "dev": true, - "libc": [ - "musl" - ], "license": "MIT", "optional": true, "os": [ @@ -914,9 +909,6 @@ "ppc64" ], "dev": true, - "libc": [ - "glibc" - ], "license": "MIT", "optional": true, "os": [ @@ -934,9 +926,6 @@ "s390x" ], "dev": true, - "libc": [ - "glibc" - ], "license": "MIT", "optional": true, "os": [ @@ -954,9 +943,6 @@ "x64" ], "dev": true, - "libc": [ - "glibc" - ], "license": "MIT", "optional": true, "os": [ @@ -974,9 +960,6 @@ "x64" ], "dev": true, - "libc": [ - "musl" - ], "license": "MIT", "optional": true, "os": [ @@ -2743,6 +2726,12 @@ "dev": true, "license": "MIT" }, + "node_modules/voight-kampff-compactor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/voight-kampff-compactor/-/voight-kampff-compactor-1.0.0.tgz", + "integrity": "sha512-2YRQUuD4XoGTVGbjc2NUbDVJKWt3x4ZhTW5MCvUhdDxQSMdulXY6bfK+yiB0WtZod1fYiImwHntOjxheQuEsEA==", + "license": "MIT" + }, "node_modules/voight-kampff-test": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/voight-kampff-test/-/voight-kampff-test-2.5.0.tgz", diff --git a/package.json b/package.json index 06760ee..0089745 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,7 @@ "dependencies": { "@actions/core": "^3.0.0", "@actions/github": "^9.0.0", + "voight-kampff-compactor": "^1.0.0", "voight-kampff-test": "^2.5.0" } } diff --git a/src/ai.test.ts b/src/ai.test.ts new file mode 100644 index 0000000..8408844 --- /dev/null +++ b/src/ai.test.ts @@ -0,0 +1,131 @@ +vi.mock("voight-kampff-compactor"); + +import { compactor } from "voight-kampff-compactor"; +import { getAIAnalysis, type AIAnalysisInput, type AIAnalysisResult } from "./ai"; + +describe("getAIAnalysis", () => { + const baseInput: AIAnalysisInput = { + token: "test-token", + model: "openai/gpt-4o-mini", + username: "test-user", + analysis: { + classification: "organic", + score: 20, + flags: [{ label: "Test Flag", points: 10, detail: "This is a test flag" }], + profile: { age: 365, repos: 10 }, + }, + accountCreatedAt: "2020-01-01T00:00:00Z", + publicRepos: 10, + events: [], + }; + + const mockAIResponse: AIAnalysisResult = { + classification: "organic", + confidence: 85, + reasoning: "This is a genuine human account.", + }; + + beforeEach(() => { + vi.clearAllMocks(); + vi.mocked(compactor).mockReturnValue("l:test-user|ca:0101|pr:10"); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("should call GitHub Models API with correct parameters", async () => { + const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response( + JSON.stringify({ + choices: [{ message: { content: JSON.stringify(mockAIResponse) } }], + }), + ), + ); + + await getAIAnalysis(baseInput); + + expect(fetchSpy).toHaveBeenCalledWith( + "https://models.github.ai/inference/chat/completions", + expect.objectContaining({ + method: "POST", + headers: { + Authorization: "Bearer test-token", + "Content-Type": "application/json", + }, + }), + ); + + const body = JSON.parse(fetchSpy.mock.calls[0][1]!.body as string); + expect(body.model).toBe("openai/gpt-4o-mini"); + expect(body.messages).toHaveLength(2); + expect(body.messages[0].role).toBe("system"); + expect(body.messages[1].role).toBe("user"); + expect(body.temperature).toBe(0.3); + }); + + it("should return parsed AI response as structured object", async () => { + vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response( + JSON.stringify({ + choices: [{ message: { content: JSON.stringify(mockAIResponse) } }], + }), + ), + ); + + const result = await getAIAnalysis(baseInput); + expect(result).toEqual(mockAIResponse); + }); + + it("should return null when response has no content", async () => { + vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response(JSON.stringify({ choices: [] })), + ); + + const result = await getAIAnalysis(baseInput); + expect(result).toBeNull(); + }); + + it("should throw on non-ok response", async () => { + vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response("Forbidden", { status: 403, statusText: "Forbidden" }), + ); + + await expect(getAIAnalysis(baseInput)).rejects.toThrow("403 Forbidden"); + }); + + it("should use compactor to compact all input data", async () => { + vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response( + JSON.stringify({ + choices: [{ message: { content: JSON.stringify(mockAIResponse) } }], + }), + ), + ); + + await getAIAnalysis({ + ...baseInput, + events: [{ type: "PushEvent", created_at: "2024-03-01" }], + }); + + expect(compactor).toHaveBeenCalledWith( + expect.stringContaining('"test-user"'), + ); + }); + + it("should include compacted data in the user prompt", async () => { + vi.mocked(compactor).mockReturnValue("compacted-data-here"); + const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue( + new Response( + JSON.stringify({ + choices: [{ message: { content: JSON.stringify(mockAIResponse) } }], + }), + ), + ); + + await getAIAnalysis(baseInput); + + const body = JSON.parse(fetchSpy.mock.calls[0][1]!.body as string); + expect(body.messages[1].content).toContain("compacted-data-here"); + }); +}); diff --git a/src/ai.ts b/src/ai.ts new file mode 100644 index 0000000..44f34af --- /dev/null +++ b/src/ai.ts @@ -0,0 +1,216 @@ +import type { IdentifyReplicantResult } from "voight-kampff-test"; +import { compactor } from "voight-kampff-compactor"; + +export type AIAnalysisInput = { + token: string; + model: string; + username: string; + analysis: IdentifyReplicantResult; + accountCreatedAt: string; + publicRepos: number; + events: Record[]; +}; + +export type AIAnalysisResult = { + classification: "organic" | "mixed" | "automation"; + confidence: number; + reasoning: string; +}; + +export const SYSTEM_PROMPT = [ + "You are an expert AI system designed to analyze GitHub user accounts and classify them as human-operated (\"organic\"), bot/automated (\"automation\"), or mixed behavior patterns.", + "", + "## Your Task", + "Analyze a GitHub user's activity data (account metadata and event history) and return a classification with supporting evidence.", + "", + "## Input Data Structure", + "- user.login: GitHub username", + "- user.created_at: ISO 8601 date string (account creation time)", + "- user.public_repos: number of public repositories owned", + "- events: array of GitHub events with type, created_at, repo.name, payload", + " NOTE: Events are limited to the most recent 100 public events from the GitHub API. This is NOT the user's complete activity history — draw conclusions accordingly and avoid absolute statements about total activity.", + "", + "## Classification Categories", + '- **organic**: Human-operated account (low bot-like signals, score >= 70)', + '- **mixed**: Uncertain patterns (moderate bot-like signals, score 50-69)', + '- **automation**: Likely bot-operated (strong bot-like signals, score < 50)', + "", + "## Analysis Framework", + "", + "Evaluate each pattern independently. Assign a score per flag reflecting severity of the detected behavior (low, medium, high).", + "", + "### 1. Account Age Context", + "- New account (< 30 days): Apply stricter scrutiny for bot patterns", + "- Young account (30-89 days): Moderate scrutiny, evaluate patterns carefully", + "- Established account (>= 90 days): Higher tolerance for activity volume", + "", + "### 2. Repository Activity Baseline", + "- Account has no personal repos but 20+ events: Suspicious pattern", + "- 95%+ external activity with < 5 personal repos: No personal investment", + "", + "### 3. Bot-Like Pattern Detection (11 patterns to evaluate)", + "", + "#### A. Rapid Repository Creation", + 'Detect CreateEvent (ref_type="repository") clustering in 24 hours.', + "Pattern: Rapid-fire repo creation suggests automation.", + "", + "#### B. Fork Surge", + "Detect ForkEvent clustering in 24-hour window.", + "Pattern: Concentrated forking activity suggests bot behavior.", + "", + "#### C. Commit Burst", + "Detect PushEvent clustering in 1-hour window.", + "Pattern: Inhuman commit velocity or ultra-tight clustering (seconds apart).", + "", + "#### D. 24/7 Activity Pattern", + "Analyze each calendar day: activity spanning 21+ unique hours with minimal rest suggests no sleep.", + "Pattern: Sustained multi-day coding without realistic sleep windows. Over 2 days indicates strong bot signals, especially if combined with other patterns.", + "", + "#### E. Event Type Diversity (Shannon's Entropy)", + "Calculate normalized Shannon entropy of event types:", + "- Entropy = sum(p * log2(p)) for each type's probability p", + "- Normalized entropy = Entropy / log2(number_of_types)", + "- Low entropy (< 0.5): Bot-like concentrated profile", + "- High entropy (> 0.8): Suspicious uniform distribution across types", + "", + "Pattern: Either narrow rigid focus (few types) OR artificial cycling through all types, combined with no human interactions (comments, reviews, watches).", + "", + "#### F. Issue Comment Spam", + "Detect IssueCommentEvent clustering across many repos within 30-minute window.", + "Pattern: Rapid-fire commenting across unrelated repos suggests automation.", + "", + "#### G. Branch -> Pull Request Correlation", + "Detect pattern: branch created -> PR opened within window, repeated consistently.", + "Pattern: Mechanical CI/CD automation cycling (not typical human workflow).", + "", + "#### H. PR Volume", + "Detect PR bursts to external repos (young accounts only, < 90 days).", + "Pattern: High external PR volume without personal repo activity.", + "", + "#### I. Consecutive Days Activity", + "Count calendar days with any activity.", + "Pattern: 21+ consecutive days suggests either dedication or tireless bot.", + "", + "#### J. External Repo Spread", + "Count unique external repos (young accounts only, < 90 days).", + "Pattern: Contributing to many different external repos broadly suggests spray-and-pray behavior.", + "", + "#### K. Daily Coding Hour Distribution", + "Analyze hour spread within each calendar day separately.", + "Pattern: High entropy (>0.8) across 16+ hours in a day suggests automated activity cycling.", + "", + "## Scoring Methodology", + "Evaluate all detected patterns independently. For each flag present, assign a severity-based score (0-100 scale per flag).", + "Calculate final confidence score as: average of severity assessments across all flags, weighted by pattern significance.", + "", + "- Extreme automated signals: 0-20 (strong bot indicators)", + "- High bot-like behavior: 20-40 (multiple suspicious patterns)", + "- Moderate concerns: 40-60 (mixed or ambiguous signals)", + "- Low concerns: 60-80 (mostly human-like with isolated flags)", + "- Confident human: 80-100 (organic patterns throughout)", + "", + "## Time Window Analysis Rules", + "- 24-hour rolling windows: sliding analysis for clustering patterns", + "- Per-day analysis: evaluate each calendar day independently (not globally)", + "- All times treated as UTC", + "", + "## Return JSON Format (MUST be valid JSON only)", + '{', + ' "classification": "organic" | "mixed" | "automation",', + ' "confidence": number (0-100),', + ' "reasoning": "string (2-3 sentences of concise, evidence-based assessment)"', + '}', + "", + "## Output Requirements", + "- Evaluate patterns independently without predetermined point mappings", + "- Assign severity per flag based on strength of evidence", + "- Provide specific evidence in the reasoning (actual counts, timeframes, observed behaviors)", + "- Return ONLY valid JSON - no markdown, no extra text, no code fences", + "- Be precise. Focus on evidence-based assessment, not fixed rubrics.", +].join("\n"); + +function slimEvents(events: Record[]) { + return events.map((e) => { + const payload = (e.payload ?? {}) as Record; + return { + type: e.type, + created_at: e.created_at, + repo: (e.repo as { name?: string })?.name, + action: payload.action, + ref: payload.ref, + ref_type: payload.ref_type, + size: payload.size, + commits: Array.isArray(payload.commits) ? payload.commits.length : undefined, + }; + }); +} + +export function buildUserPrompt(input: AIAnalysisInput): string { + const compactedData = compactor( + JSON.stringify({ + user: { + login: input.username, + created_at: input.accountCreatedAt, + public_repos: input.publicRepos, + }, + heuristic: { + score: input.analysis.score, + classification: input.analysis.classification, + flags: input.analysis.flags, + }, + events: slimEvents(input.events), + }), + ); + + return `Here is the data to analyze: ${compactedData}`; +} + +export async function getAIAnalysis({ + token, + model, + username, + analysis, + accountCreatedAt, + publicRepos, + events, +}: AIAnalysisInput): Promise { + const prompt = buildUserPrompt({ token, model, username, analysis, accountCreatedAt, publicRepos, events }); + + const response = await fetch( + "https://models.github.ai/inference/chat/completions", + { + method: "POST", + headers: { + Authorization: `Bearer ${token}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model, + messages: [ + { role: "system", content: SYSTEM_PROMPT }, + { role: "user", content: prompt }, + ], + temperature: 0.3, + }), + }, + ); + + if (!response.ok) { + const body = await response.text(); + throw new Error(`${response.status} ${response.statusText}: ${body}`); + } + + const data = (await response.json()) as { + choices?: { message?: { content?: string } }[]; + }; + let content = data.choices?.[0]?.message?.content?.trim() ?? null; + if (!content) return null; + + content = content + // remove DeepSeek-R1 markers if present + // This is stupid. + .replace(/[\s\S]*?<\/think>/g, "").trim(); + + // todo : add validation of content structure before parsing like zod of + return JSON.parse(content) as AIAnalysisResult; +} diff --git a/src/index.test.ts b/src/index.test.ts index e48dff7..b7c8b38 100644 --- a/src/index.test.ts +++ b/src/index.test.ts @@ -4,6 +4,7 @@ import { rmSync } from "fs"; vi.mock("@actions/core"); vi.mock("@actions/github"); vi.mock("voight-kampff-test"); +vi.mock("./ai"); import * as core from "@actions/core"; import * as github from "@actions/github"; @@ -11,6 +12,7 @@ import { identifyReplicant, getClassificationDetails, } from "voight-kampff-test"; +import { getAIAnalysis, type AIAnalysisResult } from "./ai"; import { run } from "./index"; describe("AgentScan Action", () => { @@ -35,6 +37,9 @@ describe("AgentScan Action", () => { "skip-members": "", "cache-path": "", "skip-comment-on-organic": "false", + "agent-scan-comment": "true", + "ai-analysis": "false", + "ai-model": "openai/gpt-4o-mini", }; const config = { ...defaults, ...overrides }; @@ -101,6 +106,7 @@ describe("AgentScan Action", () => { label: "Organic Account", description: "This account appears to be organic.", }); + vi.mocked(getAIAnalysis).mockResolvedValue(null); vi.mocked(core.setOutput).mockImplementation(() => {}); }; @@ -371,4 +377,133 @@ describe("AgentScan Action", () => { }); }); }); + + describe("AI Analysis - GitHub Models integration", () => { + beforeEach(() => { + setupContext(); + setupCommonMocks(); + vi.mocked(github.getOctokit).mockReturnValue(createMockOctokit() as any); + }); + + it("should not call AI when ai-analysis is disabled", async () => { + setupInputs({ "ai-analysis": "false" }); + + await run(); + + expect(getAIAnalysis).not.toHaveBeenCalled(); + expect(core.setOutput).not.toHaveBeenCalledWith( + "ai-assessment", + expect.anything(), + ); + }); + + it("should call AI and set output when ai-analysis is enabled", async () => { + setupInputs({ "ai-analysis": "true", "ai-model": "openai/gpt-4o-mini" }); + const mockResult: AIAnalysisResult = { + classification: "organic", + confidence: 90, + reasoning: "This account appears organic based on the profile data.", + }; + vi.mocked(getAIAnalysis).mockResolvedValue(mockResult); + + await run(); + + expect(getAIAnalysis).toHaveBeenCalledWith( + expect.objectContaining({ + token: "test-token", + model: "openai/gpt-4o-mini", + username: "test-user", + }), + ); + expect(core.setOutput).toHaveBeenCalledWith( + "ai-assessment", + JSON.stringify(mockResult), + ); + expect(core.info).toHaveBeenCalledWith("AI analysis completed"); + }); + + it("should include AI assessment in PR comment", async () => { + setupInputs({ "ai-analysis": "true", "agent-scan-comment": "true" }); + vi.mocked(getAIAnalysis).mockResolvedValue({ + classification: "organic", + confidence: 95, + reasoning: "Likely a genuine human user.", + }); + + await run(); + + const mockOctokit = vi.mocked(github.getOctokit).mock.results[0].value; + const commentCall = + mockOctokit.rest.issues.createComment.mock.calls[0][0]; + expect(commentCall.body).toContain("AI Assessment"); + expect(commentCall.body).toContain("organic"); + expect(commentCall.body).toContain("95% confidence"); + expect(commentCall.body).toContain("Likely a genuine human user."); + }); + + it("should warn and continue when AI analysis fails", async () => { + setupInputs({ "ai-analysis": "true" }); + vi.mocked(getAIAnalysis).mockRejectedValue( + new Error("500 Internal Server Error"), + ); + + await run(); + + expect(core.warning).toHaveBeenCalledWith( + expect.stringContaining("AI analysis failed"), + ); + expect(core.setOutput).toHaveBeenCalledWith("ai-assessment", ""); + expect(core.setOutput).toHaveBeenCalledWith("classification", "organic"); + }); + + it("should use custom AI model from input", async () => { + setupInputs({ + "ai-analysis": "true", + "ai-model": "mistral-ai/ministral-3b", + }); + vi.mocked(getAIAnalysis).mockResolvedValue({ + classification: "organic", + confidence: 80, + reasoning: "Assessment result.", + }); + + await run(); + + expect(getAIAnalysis).toHaveBeenCalledWith( + expect.objectContaining({ + model: "mistral-ai/ministral-3b", + }), + ); + }); + + it("should pass events to AI analysis", async () => { + setupInputs({ "ai-analysis": "true" }); + vi.mocked(github.getOctokit).mockReturnValue( + createMockOctokit({ + activity: { + listPublicEventsForUser: vi.fn().mockResolvedValue({ + data: [ + { type: "PushEvent", created_at: "2024-03-01T00:00:00Z", repo: { name: "user/repo" } }, + ], + }), + }, + }) as any, + ); + vi.mocked(getAIAnalysis).mockResolvedValue({ + classification: "organic", + confidence: 75, + reasoning: "Assessment.", + }); + + await run(); + + expect(getAIAnalysis).toHaveBeenCalledWith( + expect.objectContaining({ + events: [ + { type: "PushEvent", created_at: "2024-03-01T00:00:00Z", repo: { name: "user/repo" } }, + ], + }), + ); + }); + }); }); diff --git a/src/index.ts b/src/index.ts index cd3ee4c..abbb51a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -2,13 +2,13 @@ import * as core from "@actions/core"; import * as github from "@actions/github"; import * as fs from "fs"; import * as path from "path"; - import { identifyReplicant, getClassificationDetails, type IdentifyReplicantResult, type IdentityClassification, } from "voight-kampff-test"; +import { getAIAnalysis, type AIAnalysisResult } from "./ai"; type AutomationListItem = { username: string; @@ -33,6 +33,9 @@ async function run() { const skipCommentOnOrganic = core.getInput("skip-comment-on-organic").toLowerCase() === "true"; const cacheDir = core.getInput("cache-path"); + const aiAnalysisEnabled = + core.getInput("ai-analysis").toLowerCase() === "true"; + const aiModel = core.getInput("ai-model") || "openai/gpt-4o-mini"; const skipMembers = skipMembersInput .split(",") .map((m) => m.trim()) @@ -84,6 +87,9 @@ async function run() { let hasCommunityFlag = false; let analysis: IdentifyReplicantResult | null = null; let isFlagged = false; + let accountCreatedAt = ""; + let publicRepos = 0; + let userEvents: Record[] = []; // Use cached analysis if available, otherwise make API calls if (cachedAnalysis) { @@ -96,6 +102,9 @@ async function run() { username: username, }); + accountCreatedAt = user.created_at; + publicRepos = user.public_repos; + const { data: events } = await octokit.rest.activity.listPublicEventsForUser({ username, @@ -103,6 +112,8 @@ async function run() { page: 1, }); + userEvents = events as Record[]; + let verified: AutomationListItem[] = []; try { @@ -172,6 +183,28 @@ async function run() { core.setOutput("account-age", analysis.profile.age); core.setOutput("username", username); + let aiAssessment: AIAnalysisResult | null = null; + if (aiAnalysisEnabled) { + try { + aiAssessment = await getAIAnalysis({ + token, + model: aiModel, + username, + analysis, + accountCreatedAt, + publicRepos, + events: userEvents, + }); + core.setOutput("ai-assessment", JSON.stringify(aiAssessment ?? "")); + core.info("AI analysis completed"); + } catch (error) { + core.warning( + `AI analysis failed: ${String(error)}. Continuing without AI assessment.`, + ); + core.setOutput("ai-assessment", ""); + } + } + // Skip commenting if analysis is organic and skip-comment-on-organic is enabled if ( skipCommentOnOrganic && @@ -203,13 +236,16 @@ async function run() { try { if (core.getInput("agent-scan-comment") === "true") { + const aiSection = aiAssessment ? `\n\n#### 🤖 AI Assessment\n**${aiAssessment.classification}** (${aiAssessment.confidence}% confidence) — ${aiAssessment.reasoning}` + : ""; + await octokit.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, body: `### ${indicator} ${details.label} -${details.description} +${details.description}${aiSection} [View full analysis →](https://agentscan.netlify.app/user/${username})