Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ The action will run automatically on new and reopened pull requests, analyzing t
- **agent-scan-comment** (optional): Enable/disable posting comments on PRs (default: true). Set to false if you only want to use the outputs
- **cache-path** (optional): Path to cache directory for storing analysis results (e.g., `.agentscan-cache`). When provided, analysis results are cached and reused within the TTL period
- **skip-comment-on-organic** (optional): Skip posting PR comment if analysis result is "organic" (default: false)
- **ai-analysis** (optional): Enable AI-powered analysis using GitHub Models to assess if the user is automated (default: false). Requires `models: read` permission in your workflow
- **ai-model** (optional): The GitHub Models model to use for AI analysis (default: `openai/gpt-4o-mini`)

### Skip Members

Expand Down Expand Up @@ -112,6 +114,41 @@ To disable all PR comments and only use the action's outputs, set `agent-scan-co

This is useful if you want to use the analysis outputs in downstream steps without posting comments.

### AI Analysis

Enable AI-powered analysis using [GitHub Models](https://docs.github.com/en/github-models) to get an AI assessment of whether the user appears automated. This uses the GitHub Models inference API and requires the `models: read` permission.

```yaml
name: AgentScan

on:
pull_request_target:
types: [opened, reopened]

permissions:
pull-requests: write
contents: read
models: read

jobs:
agentscan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: AgentScan
uses: MatteoGabriele/agentscan-action@v1.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
ai-analysis: true
ai-model: "openai/gpt-4o-mini" # optional, this is the default
```

When enabled, the AI assessment is:
- Included in the PR comment under an **AI Assessment** section
- Available via the `ai-assessment` output for use in downstream steps

The AI analysis is non-blocking — if the request fails, the action warns and continues with the heuristic-only analysis.

## Testing

Run tests with vitest:
Expand Down
10 changes: 10 additions & 0 deletions action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ inputs:
description: "Path to cache directory (e.g., '.agentscan-cache') - if provided, analysis results will be cached"
required: false
default: ""
ai-analysis:
description: "Enable AI-powered analysis using GitHub Models to assess if the user is automated. Requires 'models: read' permission in your workflow."
required: false
default: "false"
ai-model:
description: "The GitHub Models model to use for AI analysis (e.g., 'openai/gpt-4o-mini', 'mistral-ai/ministral-3b')"
required: false
default: "openai/gpt-4o-mini"
outputs:
flagged:
description: "Whether the account was flagged"
Expand All @@ -36,6 +44,8 @@ outputs:
description: "Account age in days"
username:
description: "The GitHub username that was analyzed"
ai-assessment:
description: "JSON object with AI classification, confidence (0-100), and reasoning (only when ai-analysis is enabled)"
branding:
icon: "activity"
color: "white"
Expand Down
25 changes: 7 additions & 18 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"dependencies": {
"@actions/core": "^3.0.0",
"@actions/github": "^9.0.0",
"voight-kampff-compactor": "^1.0.0",
"voight-kampff-test": "^2.5.0"
}
}
131 changes: 131 additions & 0 deletions src/ai.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
vi.mock("voight-kampff-compactor");

import { compactor } from "voight-kampff-compactor";
import { getAIAnalysis, type AIAnalysisInput, type AIAnalysisResult } from "./ai";

describe("getAIAnalysis", () => {
const baseInput: AIAnalysisInput = {
token: "test-token",
model: "openai/gpt-4o-mini",
username: "test-user",
analysis: {
classification: "organic",
score: 20,
flags: [{ label: "Test Flag", points: 10, detail: "This is a test flag" }],
profile: { age: 365, repos: 10 },
},
accountCreatedAt: "2020-01-01T00:00:00Z",
publicRepos: 10,
events: [],
};

const mockAIResponse: AIAnalysisResult = {
classification: "organic",
confidence: 85,
reasoning: "This is a genuine human account.",
};

beforeEach(() => {
vi.clearAllMocks();
vi.mocked(compactor).mockReturnValue("l:test-user|ca:0101|pr:10");
});

afterEach(() => {
vi.restoreAllMocks();
});

it("should call GitHub Models API with correct parameters", async () => {
const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response(
JSON.stringify({
choices: [{ message: { content: JSON.stringify(mockAIResponse) } }],
}),
),
);

await getAIAnalysis(baseInput);

expect(fetchSpy).toHaveBeenCalledWith(
"https://models.github.ai/inference/chat/completions",
expect.objectContaining({
method: "POST",
headers: {
Authorization: "Bearer test-token",
"Content-Type": "application/json",
},
}),
);

const body = JSON.parse(fetchSpy.mock.calls[0][1]!.body as string);
expect(body.model).toBe("openai/gpt-4o-mini");
expect(body.messages).toHaveLength(2);
expect(body.messages[0].role).toBe("system");
expect(body.messages[1].role).toBe("user");
expect(body.temperature).toBe(0.3);
});

it("should return parsed AI response as structured object", async () => {
vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response(
JSON.stringify({
choices: [{ message: { content: JSON.stringify(mockAIResponse) } }],
}),
),
);

const result = await getAIAnalysis(baseInput);
expect(result).toEqual(mockAIResponse);
});

it("should return null when response has no content", async () => {
vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response(JSON.stringify({ choices: [] })),
);

const result = await getAIAnalysis(baseInput);
expect(result).toBeNull();
});

it("should throw on non-ok response", async () => {
vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response("Forbidden", { status: 403, statusText: "Forbidden" }),
);

await expect(getAIAnalysis(baseInput)).rejects.toThrow("403 Forbidden");
});

it("should use compactor to compact all input data", async () => {
vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response(
JSON.stringify({
choices: [{ message: { content: JSON.stringify(mockAIResponse) } }],
}),
),
);

await getAIAnalysis({
...baseInput,
events: [{ type: "PushEvent", created_at: "2024-03-01" }],
});

expect(compactor).toHaveBeenCalledWith(
expect.stringContaining('"test-user"'),
);
});

it("should include compacted data in the user prompt", async () => {
vi.mocked(compactor).mockReturnValue("compacted-data-here");
const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue(
new Response(
JSON.stringify({
choices: [{ message: { content: JSON.stringify(mockAIResponse) } }],
}),
),
);

await getAIAnalysis(baseInput);

const body = JSON.parse(fetchSpy.mock.calls[0][1]!.body as string);
expect(body.messages[1].content).toContain("compacted-data-here");
});
});
Loading