diff --git a/README.md b/README.md index a0fa9dd..a327acd 100644 --- a/README.md +++ b/README.md @@ -334,12 +334,13 @@ See [Packs Guide](docs/guides/packs.md) for detailed pack creation and best prac greybeard works with any LLM backend. Configure once with `greybeard init`: -| Backend | How | What You Need | -| ----------- | ------------- | -------------------------------------------------- | -| `openai` | OpenAI API | `OPENAI_API_KEY` | -| `anthropic` | Anthropic API | `ANTHROPIC_API_KEY` + `greybeard[anthropic]` extra | -| `ollama` | Local (free) | [Ollama](https://ollama.ai) running locally | -| `lmstudio` | Local (free) | [LM Studio](https://lmstudio.ai) server running | +| Backend | How | What You Need | +| ----------- | --------------------- | -------------------------------------------------- | +| `openai` | OpenAI API | `OPENAI_API_KEY` | +| `anthropic` | Anthropic API | `ANTHROPIC_API_KEY` + `greybeard[anthropic]` extra | +| `copilot` | GitHub Copilot API | `GITHUB_TOKEN` (GitHub PAT or CLI token) | +| `ollama` | Local (free) | [Ollama](https://ollama.ai) running locally | +| `lmstudio` | Local (free) | [LM Studio](https://lmstudio.ai) server running | ### Configure Your Backend diff --git a/docs/guides/backends.md b/docs/guides/backends.md index 3d61bc2..b61f364 100644 --- a/docs/guides/backends.md +++ b/docs/guides/backends.md @@ -49,6 +49,48 @@ greybeard config set llm.model claude-3-opus-20240229 # most capable --- +## GitHub Copilot + +Uses GitHub Copilot's API endpoint at `api.githubcopilot.com/v1`. Requires a GitHub Personal Access Token or GitHub CLI authentication. + +**Setup:** + +1. Create a Personal Access Token at [github.com/settings/tokens](https://github.com/settings/tokens) with at least `copilot` scope, OR authenticate with GitHub CLI: + ```bash + gh auth login + ``` + +2. Configure greybeard: + ```bash + export GITHUB_TOKEN=ghp_... # Your GitHub token + greybeard config set llm.backend copilot + ``` + +3. Verify the configuration: + ```bash + greybeard config + ``` + +**Default model:** `gpt-4-turbo` + +**Other models:** Depends on what models are available through your Copilot subscription: +```bash +greybeard config set llm.model gpt-4o # If available +greybeard config set llm.model gpt-4 # Older model +``` + +**Custom GitHub token env var:** +If you want to use a different environment variable (e.g., for CI/CD): +```bash +export MY_GITHUB_TOKEN=ghp_... +greybeard config set llm.api_key_env MY_GITHUB_TOKEN +``` + +!!! note +GitHub Copilot API access requires an active Copilot subscription and a GitHub Personal Access Token with appropriate permissions. Check [GitHub's Copilot documentation](https://docs.github.com/en/copilot/about-github-copilot) for current availability. + +--- + ## Ollama (local, free) Run any open-source model locally — no API key, no cost, fully offline. diff --git a/greybeard/analyzer.py b/greybeard/analyzer.py index 05ac9ef..1c6a77d 100644 --- a/greybeard/analyzer.py +++ b/greybeard/analyzer.py @@ -313,7 +313,7 @@ def _run_copilot( ) -> tuple[str, int, int]: """Run via GitHub Copilot API (OpenAI-compatible endpoint).""" try: - from openai import OpenAI + from openai import OpenAI, APIStatusError except ImportError: print("Error: openai package not installed. Run: uv pip install openai", file=sys.stderr) sys.exit(1) @@ -335,24 +335,54 @@ def _run_copilot( {"role": "user", "content": user_message}, ] - if stream: - text = _stream_openai(client, model, messages) - input_tokens = len(system_prompt.split()) + len(user_message.split()) - output_tokens = len(text.split()) - return text, input_tokens, output_tokens - else: - resp = client.chat.completions.create( - model=model, - messages=messages, # type: ignore[arg-type] - stream=False, - ) - text = resp.choices[0].message.content or "" # type: ignore[union-attr] - usage = resp.usage # type: ignore[union-attr] - return ( - text, - (usage.prompt_tokens if usage else 0), - (usage.completion_tokens if usage else 0), - ) + try: + if stream: + text = _stream_openai(client, model, messages) + input_tokens = len(system_prompt.split()) + len(user_message.split()) + output_tokens = len(text.split()) + return text, input_tokens, output_tokens + else: + resp = client.chat.completions.create( + model=model, + messages=messages, # type: ignore[arg-type] + stream=False, + ) + text = resp.choices[0].message.content or "" # type: ignore[union-attr] + usage = resp.usage # type: ignore[union-attr] + return ( + text, + (usage.prompt_tokens if usage else 0), + (usage.completion_tokens if usage else 0), + ) + except APIStatusError as e: + if e.status_code == 404: + print( + f"Error: GitHub Copilot API endpoint not found (404).\n" + f"Check that the base URL is correct: {base_url}\n" + f"Ensure your GitHub token is valid and has access to Copilot.", + file=sys.stderr, + ) + elif e.status_code == 401: + print( + f"Error: GitHub Copilot authentication failed (401).\n" + f"Your GitHub token is invalid or expired.\n" + f"Please update your {llm.resolved_api_key_env()} token.", + file=sys.stderr, + ) + elif e.status_code == 403: + print( + f"Error: GitHub Copilot access forbidden (403).\n" + f"Your GitHub account may not have Copilot access.\n" + f"Check your GitHub subscription and Copilot availability.", + file=sys.stderr, + ) + else: + print( + f"Error: GitHub Copilot API error ({e.status_code}).\n" + f"Details: {e.message}", + file=sys.stderr, + ) + sys.exit(1) def _stream_openai(client: object, model: str, messages: list[dict[str, str]]) -> str: diff --git a/greybeard/backends/copilot.py b/greybeard/backends/copilot.py new file mode 100644 index 0000000..e88d4a3 --- /dev/null +++ b/greybeard/backends/copilot.py @@ -0,0 +1,81 @@ +"""GitHub Copilot LLM backend implementation. + +Routes to api.githubcopilot.com/v1 using OpenAI-compatible API. +Requires GitHub authentication token (PAT or GitHub CLI token). +""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from greybeard.config import LLMConfig + + +class CopilotBackend: + """GitHub Copilot backend for OpenAI-compatible API access. + + GitHub Copilot uses the OpenAI-compatible API endpoint at + api.githubcopilot.com/v1. Authentication requires a GitHub token + (personal access token or GitHub CLI token). + + Attributes: + base_url: The GitHub Copilot API endpoint URL. + api_key_env: Environment variable name for the GitHub token. + """ + + base_url = "https://api.githubcopilot.com/v1" + api_key_env = "GITHUB_TOKEN" + + def __init__(self, github_token: str | None = None) -> None: + """Initialize Copilot backend. + + Args: + github_token: GitHub token. If not provided, reads from GITHUB_TOKEN env var. + + Raises: + ValueError: If no token is provided and GITHUB_TOKEN env var is not set. + """ + token = github_token or os.getenv(self.api_key_env) + if not token: + raise ValueError( + f"GitHub token required. Set {self.api_key_env} env var or pass github_token." + ) + self.token: str = token + + @property + def api_key(self) -> str: + """Get the API key (GitHub token).""" + return self.token + + @staticmethod + def get_api_key_env_var() -> str: + """Get the environment variable name for API key.""" + return CopilotBackend.api_key_env + + @staticmethod + def get_base_url() -> str: + """Get the base URL for Copilot API.""" + return CopilotBackend.base_url + + +def get_copilot_backend(config: LLMConfig) -> CopilotBackend: + """Factory function to create a Copilot backend from LLMConfig. + + Args: + config: LLMConfig with optional api_key_env override. + + Returns: + Initialized CopilotBackend instance. + + Raises: + ValueError: If no GitHub token is available. + """ + api_key_env = config.api_key_env or CopilotBackend.api_key_env + token = os.getenv(api_key_env) + if not token: + raise ValueError( + f"GitHub token required. Set {api_key_env} env var or configure it in greybeard." + ) + return CopilotBackend(github_token=token) diff --git a/greybeard/cli.py b/greybeard/cli.py index a89b704..7ce518b 100644 --- a/greybeard/cli.py +++ b/greybeard/cli.py @@ -165,6 +165,12 @@ def cli() -> None: ) @click.option("--context", "-c", default="", help="Additional context notes.") @click.option("--model", default=None, help="Override LLM model.") +@click.option( + "--backend", + default=None, + type=click.Choice(KNOWN_BACKENDS), + help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.", +) @click.option( "--audience", "-a", @@ -196,7 +202,7 @@ def cli() -> None: help="Start interactive REPL after initial analysis.", ) def analyze( - mode, pack, repo, context, model, audience, output, fmt, save_decision_name, interactive + mode, pack, repo, context, model, backend, audience, output, fmt, save_decision_name, interactive ) -> None: r"""Analyze a decision, diff, or document. @@ -214,6 +220,11 @@ def analyze( git diff main | greybeard analyze --interactive --mode mentor """ cfg = GreybeardConfig.load() + + # Override backend if provided via --backend flag + if backend: + cfg.llm.backend = backend + mode = mode or cfg.default_mode pack_name = pack or cfg.default_pack @@ -285,6 +296,12 @@ def analyze( ) @click.option("--pack", "-p", default=None, help="Content pack name or path.") @click.option("--model", default=None, help="Override LLM model.") +@click.option( + "--backend", + default=None, + type=click.Choice(KNOWN_BACKENDS), + help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.", +) @click.option("--output", "-o", default=None, help="Save review to a file.") @click.option( "--format", @@ -295,7 +312,7 @@ def analyze( show_default=True, help="Output format.", ) -def self_check(context, pack, model, output, fmt) -> None: +def self_check(context, pack, model, backend, output, fmt) -> None: r"""Review your own decision before sharing it. \b @@ -304,6 +321,11 @@ def self_check(context, pack, model, output, fmt) -> None: greybeard self-check --context "migration plan" --format json --output check.json """ cfg = GreybeardConfig.load() + + # Override backend if provided via --backend flag + if backend: + cfg.llm.backend = backend + pack_name = pack or cfg.default_pack try: @@ -354,6 +376,12 @@ def self_check(context, pack, model, output, fmt) -> None: "--pack", "-p", default="mentor-mode", show_default=True, help="Content pack name or path." ) @click.option("--model", default=None, help="Override LLM model.") +@click.option( + "--backend", + default=None, + type=click.Choice(KNOWN_BACKENDS), + help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.", +) @click.option("--output", "-o", default=None, help="Save to a file.") @click.option( "--format", @@ -370,7 +398,7 @@ def self_check(context, pack, model, output, fmt) -> None: is_flag=True, help="Start interactive REPL after initial analysis.", ) -def coach(audience, context, pack, model, output, fmt, interactive) -> None: +def coach(audience, context, pack, model, backend, output, fmt, interactive) -> None: r"""Get help communicating a concern or decision constructively. \b @@ -381,6 +409,11 @@ def coach(audience, context, pack, model, output, fmt, interactive) -> None: greybeard coach --audience team --context "shipping too fast" --interactive """ cfg = GreybeardConfig.load() + + # Override backend if provided via --backend flag + if backend: + cfg.llm.backend = backend + try: content_pack = load_pack(pack) except FileNotFoundError as e: @@ -604,7 +637,7 @@ def config_set(key: str, value: str) -> None: \b Keys: - llm.backend openai | anthropic | ollama | lmstudio + llm.backend openai | anthropic | copilot | groq | ollama | lmstudio llm.model e.g. gpt-4o, claude-3-5-sonnet-20241022, llama3.2 llm.base_url e.g. http://localhost:11434/v1 llm.api_key_env e.g. OPENAI_API_KEY @@ -1140,7 +1173,13 @@ def adr_list(repo) -> None: default=None, help="Output file path (default: batch-analysis.{format}).", ) -def batch_analyze(reviews: tuple[str, ...], output_format: str, output: str | None) -> None: +@click.option( + "--backend", + default=None, + type=click.Choice(KNOWN_BACKENDS), + help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.", +) +def batch_analyze(reviews: tuple[str, ...], output_format: str, output: str | None, backend: str | None) -> None: """Analyze and aggregate multiple reviews. Combines multiple review outputs, deduplicates findings, diff --git a/greybeard/config.py b/greybeard/config.py index 30fce1d..6721e8b 100644 --- a/greybeard/config.py +++ b/greybeard/config.py @@ -17,7 +17,7 @@ PACK_CACHE_DIR = CONFIG_DIR / "packs" # Backend names we know about -KNOWN_BACKENDS = ["openai", "anthropic", "ollama", "lmstudio"] +KNOWN_BACKENDS = ["openai", "anthropic", "ollama", "lmstudio", "copilot", "groq"] # Default models per backend. # Anthropic default is claude-haiku-4-5-20251001 (not Sonnet) — a deliberate @@ -31,6 +31,8 @@ "anthropic": "claude-haiku-4-5-20251001", "ollama": "llama3.2", "lmstudio": "local-model", + "copilot": "gpt-4-turbo", # Copilot provides access to Copilot-managed models + "groq": "llama-3.1-8b-instant", # Fast and cheap, good for simple tasks } # Default base URLs for local/alternate backends @@ -45,6 +47,8 @@ "anthropic": "ANTHROPIC_API_KEY", "ollama": "", # no key needed "lmstudio": "", # no key needed + "copilot": "GITHUB_TOKEN", # GitHub Personal Access Token or GitHub CLI token + "groq": "GROQ_API_KEY", # Groq API key } diff --git a/tests/test_copilot_backend.py b/tests/test_copilot_backend.py new file mode 100644 index 0000000..33b1cd4 --- /dev/null +++ b/tests/test_copilot_backend.py @@ -0,0 +1,471 @@ +"""Tests for the GitHub Copilot LLM backend.""" + +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from greybeard.backends.copilot import CopilotBackend, get_copilot_backend +from greybeard.config import GreybeardConfig, LLMConfig + + +class TestCopilotBackend: + """Test CopilotBackend class initialization and properties.""" + + def test_init_with_token(self): + """Test initialization with explicit token.""" + backend = CopilotBackend(github_token="test-token-123") + assert backend.token == "test-token-123" + assert backend.api_key == "test-token-123" + + def test_init_with_env_var(self, monkeypatch): + """Test initialization from environment variable.""" + monkeypatch.setenv("GITHUB_TOKEN", "env-token-456") + backend = CopilotBackend() + assert backend.token == "env-token-456" + assert backend.api_key == "env-token-456" + + def test_init_missing_token_raises(self, monkeypatch): + """Test that missing token raises ValueError.""" + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + with pytest.raises(ValueError, match="GitHub token required"): + CopilotBackend() + + def test_base_url_property(self): + """Test base_url property.""" + assert CopilotBackend.base_url == "https://api.githubcopilot.com/v1" + + def test_api_key_env_property(self): + """Test api_key_env property.""" + assert CopilotBackend.api_key_env == "GITHUB_TOKEN" + + def test_get_base_url_static_method(self): + """Test static get_base_url method.""" + url = CopilotBackend.get_base_url() + assert url == "https://api.githubcopilot.com/v1" + + def test_get_api_key_env_var_static_method(self): + """Test static get_api_key_env_var method.""" + env_var = CopilotBackend.get_api_key_env_var() + assert env_var == "GITHUB_TOKEN" + + +class TestGetCopilotBackendFactory: + """Test get_copilot_backend factory function.""" + + def test_factory_with_env_var(self, monkeypatch): + """Test factory function with environment variable.""" + monkeypatch.setenv("GITHUB_TOKEN", "factory-token-789") + config = LLMConfig(backend="copilot") + backend = get_copilot_backend(config) + + assert isinstance(backend, CopilotBackend) + assert backend.api_key == "factory-token-789" + + def test_factory_with_custom_env_var(self, monkeypatch): + """Test factory with custom environment variable.""" + monkeypatch.setenv("CUSTOM_GITHUB_TOKEN", "custom-token-999") + config = LLMConfig(backend="copilot", api_key_env="CUSTOM_GITHUB_TOKEN") + backend = get_copilot_backend(config) + + assert backend.api_key == "custom-token-999" + + def test_factory_missing_token_raises(self, monkeypatch): + """Test factory raises when token is missing.""" + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + config = LLMConfig(backend="copilot") + + with pytest.raises(ValueError, match="GitHub token required"): + get_copilot_backend(config) + + +class TestCopilotConfigIntegration: + """Test Copilot backend integration with GreybeardConfig.""" + + def test_copilot_in_known_backends(self): + """Test that copilot is in KNOWN_BACKENDS.""" + from greybeard.config import KNOWN_BACKENDS + + assert "copilot" in KNOWN_BACKENDS + + def test_copilot_default_model(self): + """Test default model for copilot backend.""" + from greybeard.config import DEFAULT_MODELS + + assert "copilot" in DEFAULT_MODELS + assert DEFAULT_MODELS["copilot"] == "gpt-4-turbo" + + def test_copilot_api_key_env(self): + """Test API key environment variable for copilot.""" + from greybeard.config import DEFAULT_API_KEY_ENVS + + assert "copilot" in DEFAULT_API_KEY_ENVS + assert DEFAULT_API_KEY_ENVS["copilot"] == "GITHUB_TOKEN" + + def test_copilot_config_resolved_model(self): + """Test resolved model for copilot config.""" + config = LLMConfig(backend="copilot") + assert config.resolved_model() == "gpt-4-turbo" + + def test_copilot_config_base_url(self): + """Test base URL resolution for copilot.""" + config = LLMConfig(backend="copilot") + resolved_url = config.resolved_base_url() + assert resolved_url is None # copilot not in DEFAULT_BASE_URLS + + def test_copilot_config_api_key(self, monkeypatch): + """Test API key resolution for copilot config.""" + monkeypatch.setenv("GITHUB_TOKEN", "test-github-token") + config = LLMConfig(backend="copilot") + assert config.resolved_api_key() == "test-github-token" + + def test_copilot_config_api_key_env_var(self): + """Test API key environment variable name resolution.""" + config = LLMConfig(backend="copilot") + assert config.resolved_api_key_env() == "GITHUB_TOKEN" + + def test_greybeard_config_load_with_copilot(self, tmp_path, monkeypatch): + """Test loading GreybeardConfig with copilot backend.""" + monkeypatch.setenv("GITHUB_TOKEN", "test-token-xyz") + config_dict = { + "llm": { + "backend": "copilot", + "model": "gpt-4-turbo", + } + } + cfg = GreybeardConfig.from_dict(config_dict) + + assert cfg.llm.backend == "copilot" + assert cfg.llm.resolved_model() == "gpt-4-turbo" + assert cfg.llm.resolved_api_key() == "test-token-xyz" + + +class TestCopilotRunReview: + """Test _run_copilot function in analyzer.py.""" + + def test_run_copilot_non_streaming(self, monkeypatch): + """Test _run_copilot in non-streaming mode.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + # Mock OpenAI client + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + # Mock response + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "## Review Result\n\nLooks good!" + mock_response.usage.prompt_tokens = 150 + mock_response.usage.completion_tokens = 75 + mock_client.chat.completions.create.return_value = mock_response + + llm = LLMConfig(backend="copilot") + text, input_tokens, output_tokens = _run_copilot( + llm, + "gpt-4-turbo", + "system prompt", + "user message", + stream=False, + ) + + assert text == "## Review Result\n\nLooks good!" + assert input_tokens == 150 + assert output_tokens == 75 + + # Verify the client was initialized correctly + mock_openai_class.assert_called_once() + call_kwargs = mock_openai_class.call_args[1] + assert call_kwargs["api_key"] == "test-token" + assert call_kwargs["base_url"] == "https://api.githubcopilot.com/v1" + + def test_run_copilot_streaming(self, monkeypatch, capsys): + """Test _run_copilot in streaming mode.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + # Mock streaming response + mock_chunk1 = MagicMock() + mock_chunk1.choices = [MagicMock()] + mock_chunk1.choices[0].delta.content = "Hello" + + mock_chunk2 = MagicMock() + mock_chunk2.choices = [MagicMock()] + mock_chunk2.choices[0].delta.content = " world" + + mock_stream = iter([mock_chunk1, mock_chunk2]) + mock_client.chat.completions.create.return_value = mock_stream + + llm = LLMConfig(backend="copilot") + text, input_tokens, output_tokens = _run_copilot( + llm, + "gpt-4-turbo", + "system prompt here", + "user message here", + stream=True, + ) + + assert text == "Hello world" + assert input_tokens > 0 + assert output_tokens > 0 + + def test_run_copilot_missing_api_key(self, monkeypatch): + """Test _run_copilot raises SystemExit when token is missing.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + + llm = LLMConfig(backend="copilot") + + with pytest.raises(SystemExit): + _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False) + + def test_run_copilot_uses_correct_base_url(self, monkeypatch): + """Test that _run_copilot uses the correct GitHub Copilot API base URL.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "response" + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_client.chat.completions.create.return_value = mock_response + + llm = LLMConfig(backend="copilot") + _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False) + + # Verify base_url is set correctly + call_kwargs = mock_openai_class.call_args[1] + assert call_kwargs["base_url"] == "https://api.githubcopilot.com/v1" + + +class TestCopilotRouting: + """Test that Copilot backend is properly routed in run_review.""" + + def test_run_review_routes_to_copilot(self, monkeypatch): + """Test that run_review routes to _run_copilot for copilot backend.""" + from greybeard.analyzer import run_review + from greybeard.models import ContentPack, ReviewRequest + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + pack = ContentPack(name="test", perspective="Tester", tone="calm") + request = ReviewRequest(mode="review", pack=pack, input_text="test diff") + + config = GreybeardConfig() + config.llm.backend = "copilot" + + with patch("greybeard.analyzer._run_copilot") as mock_copilot: + mock_copilot.return_value = ("response", 100, 50) + result = run_review(request, config=config, stream=False) + + mock_copilot.assert_called_once() + assert result == "response" + + def test_run_review_async_with_copilot(self, monkeypatch): + """Test async run_review with copilot backend.""" + import asyncio + + from greybeard.analyzer import run_review_async + from greybeard.models import ContentPack, ReviewRequest + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + pack = ContentPack(name="test", perspective="Tester", tone="calm") + request = ReviewRequest(mode="review", pack=pack, input_text="test diff") + + config = GreybeardConfig() + config.llm.backend = "copilot" + + with patch("greybeard.analyzer._run_copilot") as mock_copilot: + mock_copilot.return_value = ("async response", 100, 50) + + result = asyncio.run(run_review_async(request, config=config, stream=False)) + + mock_copilot.assert_called_once() + assert result == "async response" + + +class TestCopilotErrorHandling: + """Test error handling for Copilot backend.""" + + def test_copilot_with_custom_api_key_env(self, monkeypatch): + """Test Copilot with custom API key environment variable.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("MY_GH_TOKEN", "custom-token-123") + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "result" + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_client.chat.completions.create.return_value = mock_response + + llm = LLMConfig(backend="copilot", api_key_env="MY_GH_TOKEN") + text, _, _ = _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False) + + assert text == "result" + call_kwargs = mock_openai_class.call_args[1] + assert call_kwargs["api_key"] == "custom-token-123" + + def test_copilot_with_no_usage_info(self, monkeypatch): + """Test Copilot handles response without usage info gracefully.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "result" + mock_response.usage = None # No usage info + mock_client.chat.completions.create.return_value = mock_response + + llm = LLMConfig(backend="copilot") + text, input_tokens, output_tokens = _run_copilot( + llm, "gpt-4-turbo", "system", "user", stream=False + ) + + assert text == "result" + assert input_tokens == 0 # Defaults to 0 when no usage + assert output_tokens == 0 + + +class TestCopilotModelSelection: + """Test model selection for Copilot backend.""" + + def test_copilot_with_custom_model(self, monkeypatch): + """Test Copilot with custom model override.""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "result" + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_client.chat.completions.create.return_value = mock_response + + llm = LLMConfig(backend="copilot") + _run_copilot(llm, "gpt-4o", "system", "user", stream=False) + + # Verify custom model was passed + call_args = mock_client.chat.completions.create.call_args + assert call_args[1]["model"] == "gpt-4o" + + def test_copilot_default_model_from_config(self): + """Test Copilot uses default model from config.""" + config = LLMConfig(backend="copilot", model="") + assert config.resolved_model() == "gpt-4-turbo" + + def test_copilot_custom_model_from_config(self): + """Test Copilot can override default model.""" + config = LLMConfig(backend="copilot", model="gpt-4o") + assert config.resolved_model() == "gpt-4o" + + +class TestCopilotAPIErrorHandling: + """Test error handling for GitHub Copilot API errors.""" + + def test_copilot_404_error_handling(self, monkeypatch): + """Test Copilot handles 404 errors (endpoint not found).""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + from openai import APIStatusError + + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + # Mock 404 error + error_response = MagicMock() + error_response.status_code = 404 + mock_client.chat.completions.create.side_effect = APIStatusError( + "Not Found", + response=error_response, + body={"error": "Not found"}, + ) + + llm = LLMConfig(backend="copilot") + with pytest.raises(SystemExit): + _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False) + + def test_copilot_401_error_handling(self, monkeypatch): + """Test Copilot handles 401 errors (authentication failed).""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "invalid-token") + + with patch("openai.OpenAI") as mock_openai_class: + from openai import APIStatusError + + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + # Mock 401 error + error_response = MagicMock() + error_response.status_code = 401 + mock_client.chat.completions.create.side_effect = APIStatusError( + "Unauthorized", + response=error_response, + body={"error": "Invalid token"}, + ) + + llm = LLMConfig(backend="copilot") + with pytest.raises(SystemExit): + _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False) + + def test_copilot_403_error_handling(self, monkeypatch): + """Test Copilot handles 403 errors (access forbidden).""" + from greybeard.analyzer import _run_copilot + + monkeypatch.setenv("GITHUB_TOKEN", "test-token") + + with patch("openai.OpenAI") as mock_openai_class: + from openai import APIStatusError + + mock_client = MagicMock() + mock_openai_class.return_value = mock_client + + # Mock 403 error + error_response = MagicMock() + error_response.status_code = 403 + mock_client.chat.completions.create.side_effect = APIStatusError( + "Forbidden", + response=error_response, + body={"error": "No Copilot access"}, + ) + + llm = LLMConfig(backend="copilot") + with pytest.raises(SystemExit): + _run_copilot(llm, "gpt-4-turbo", "system", "user", stream=False)