Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -334,12 +334,13 @@ See [Packs Guide](docs/guides/packs.md) for detailed pack creation and best prac

greybeard works with any LLM backend. Configure once with `greybeard init`:

| Backend | How | What You Need |
| ----------- | ------------- | -------------------------------------------------- |
| `openai` | OpenAI API | `OPENAI_API_KEY` |
| `anthropic` | Anthropic API | `ANTHROPIC_API_KEY` + `greybeard[anthropic]` extra |
| `ollama` | Local (free) | [Ollama](https://ollama.ai) running locally |
| `lmstudio` | Local (free) | [LM Studio](https://lmstudio.ai) server running |
| Backend | How | What You Need |
| ----------- | --------------------- | -------------------------------------------------- |
| `openai` | OpenAI API | `OPENAI_API_KEY` |
| `anthropic` | Anthropic API | `ANTHROPIC_API_KEY` + `greybeard[anthropic]` extra |
| `copilot` | GitHub Copilot API | `GITHUB_TOKEN` (GitHub PAT or CLI token) |
| `ollama` | Local (free) | [Ollama](https://ollama.ai) running locally |
| `lmstudio` | Local (free) | [LM Studio](https://lmstudio.ai) server running |

### Configure Your Backend

Expand Down
42 changes: 42 additions & 0 deletions docs/guides/backends.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,48 @@ greybeard config set llm.model claude-3-opus-20240229 # most capable

---

## GitHub Copilot

Uses GitHub Copilot's API endpoint at `api.githubcopilot.com/v1`. Requires a GitHub Personal Access Token or GitHub CLI authentication.

**Setup:**

1. Create a Personal Access Token at [github.com/settings/tokens](https://github.com/settings/tokens) with at least `copilot` scope, OR authenticate with GitHub CLI:
```bash
gh auth login
```

2. Configure greybeard:
```bash
export GITHUB_TOKEN=ghp_... # Your GitHub token
greybeard config set llm.backend copilot
```

3. Verify the configuration:
```bash
greybeard config
```

**Default model:** `gpt-4-turbo`

**Other models:** Depends on what models are available through your Copilot subscription:
```bash
greybeard config set llm.model gpt-4o # If available
greybeard config set llm.model gpt-4 # Older model
```

**Custom GitHub token env var:**
If you want to use a different environment variable (e.g., for CI/CD):
```bash
export MY_GITHUB_TOKEN=ghp_...
greybeard config set llm.api_key_env MY_GITHUB_TOKEN
```

!!! note
GitHub Copilot API access requires an active Copilot subscription and a GitHub Personal Access Token with appropriate permissions. Check [GitHub's Copilot documentation](https://docs.github.com/en/copilot/about-github-copilot) for current availability.

---

## Ollama (local, free)

Run any open-source model locally — no API key, no cost, fully offline.
Expand Down
68 changes: 49 additions & 19 deletions greybeard/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def _run_copilot(
) -> tuple[str, int, int]:
"""Run via GitHub Copilot API (OpenAI-compatible endpoint)."""
try:
from openai import OpenAI
from openai import OpenAI, APIStatusError
except ImportError:
print("Error: openai package not installed. Run: uv pip install openai", file=sys.stderr)
sys.exit(1)
Expand All @@ -335,24 +335,54 @@ def _run_copilot(
{"role": "user", "content": user_message},
]

if stream:
text = _stream_openai(client, model, messages)
input_tokens = len(system_prompt.split()) + len(user_message.split())
output_tokens = len(text.split())
return text, input_tokens, output_tokens
else:
resp = client.chat.completions.create(
model=model,
messages=messages, # type: ignore[arg-type]
stream=False,
)
text = resp.choices[0].message.content or "" # type: ignore[union-attr]
usage = resp.usage # type: ignore[union-attr]
return (
text,
(usage.prompt_tokens if usage else 0),
(usage.completion_tokens if usage else 0),
)
try:
if stream:
text = _stream_openai(client, model, messages)
input_tokens = len(system_prompt.split()) + len(user_message.split())
output_tokens = len(text.split())
return text, input_tokens, output_tokens
else:
resp = client.chat.completions.create(
model=model,
messages=messages, # type: ignore[arg-type]
stream=False,
)
text = resp.choices[0].message.content or "" # type: ignore[union-attr]
usage = resp.usage # type: ignore[union-attr]
return (
text,
(usage.prompt_tokens if usage else 0),
(usage.completion_tokens if usage else 0),
)
except APIStatusError as e:
if e.status_code == 404:
print(
f"Error: GitHub Copilot API endpoint not found (404).\n"
f"Check that the base URL is correct: {base_url}\n"
f"Ensure your GitHub token is valid and has access to Copilot.",
file=sys.stderr,
)
elif e.status_code == 401:
print(
f"Error: GitHub Copilot authentication failed (401).\n"
f"Your GitHub token is invalid or expired.\n"
f"Please update your {llm.resolved_api_key_env()} token.",
file=sys.stderr,
)
elif e.status_code == 403:
print(
f"Error: GitHub Copilot access forbidden (403).\n"
f"Your GitHub account may not have Copilot access.\n"
f"Check your GitHub subscription and Copilot availability.",
file=sys.stderr,
)
else:
print(
f"Error: GitHub Copilot API error ({e.status_code}).\n"
f"Details: {e.message}",
file=sys.stderr,
)
sys.exit(1)


def _stream_openai(client: object, model: str, messages: list[dict[str, str]]) -> str:
Expand Down
81 changes: 81 additions & 0 deletions greybeard/backends/copilot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
"""GitHub Copilot LLM backend implementation.

Routes to api.githubcopilot.com/v1 using OpenAI-compatible API.
Requires GitHub authentication token (PAT or GitHub CLI token).
"""

from __future__ import annotations

import os
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from greybeard.config import LLMConfig


class CopilotBackend:
"""GitHub Copilot backend for OpenAI-compatible API access.

GitHub Copilot uses the OpenAI-compatible API endpoint at
api.githubcopilot.com/v1. Authentication requires a GitHub token
(personal access token or GitHub CLI token).

Attributes:
base_url: The GitHub Copilot API endpoint URL.
api_key_env: Environment variable name for the GitHub token.
"""

base_url = "https://api.githubcopilot.com/v1"
api_key_env = "GITHUB_TOKEN"

def __init__(self, github_token: str | None = None) -> None:
"""Initialize Copilot backend.

Args:
github_token: GitHub token. If not provided, reads from GITHUB_TOKEN env var.

Raises:
ValueError: If no token is provided and GITHUB_TOKEN env var is not set.
"""
token = github_token or os.getenv(self.api_key_env)
if not token:
raise ValueError(
f"GitHub token required. Set {self.api_key_env} env var or pass github_token."
)
self.token: str = token

@property
def api_key(self) -> str:
"""Get the API key (GitHub token)."""
return self.token

@staticmethod
def get_api_key_env_var() -> str:
"""Get the environment variable name for API key."""
return CopilotBackend.api_key_env

@staticmethod
def get_base_url() -> str:
"""Get the base URL for Copilot API."""
return CopilotBackend.base_url


def get_copilot_backend(config: LLMConfig) -> CopilotBackend:
"""Factory function to create a Copilot backend from LLMConfig.

Args:
config: LLMConfig with optional api_key_env override.

Returns:
Initialized CopilotBackend instance.

Raises:
ValueError: If no GitHub token is available.
"""
api_key_env = config.api_key_env or CopilotBackend.api_key_env
token = os.getenv(api_key_env)
if not token:
raise ValueError(
f"GitHub token required. Set {api_key_env} env var or configure it in greybeard."
)
return CopilotBackend(github_token=token)
49 changes: 44 additions & 5 deletions greybeard/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,12 @@ def cli() -> None:
)
@click.option("--context", "-c", default="", help="Additional context notes.")
@click.option("--model", default=None, help="Override LLM model.")
@click.option(
"--backend",
default=None,
type=click.Choice(KNOWN_BACKENDS),
help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.",
)
@click.option(
"--audience",
"-a",
Expand Down Expand Up @@ -196,7 +202,7 @@ def cli() -> None:
help="Start interactive REPL after initial analysis.",
)
def analyze(
mode, pack, repo, context, model, audience, output, fmt, save_decision_name, interactive
mode, pack, repo, context, model, backend, audience, output, fmt, save_decision_name, interactive
) -> None:
r"""Analyze a decision, diff, or document.

Expand All @@ -214,6 +220,11 @@ def analyze(
git diff main | greybeard analyze --interactive --mode mentor
"""
cfg = GreybeardConfig.load()

# Override backend if provided via --backend flag
if backend:
cfg.llm.backend = backend

mode = mode or cfg.default_mode
pack_name = pack or cfg.default_pack

Expand Down Expand Up @@ -285,6 +296,12 @@ def analyze(
)
@click.option("--pack", "-p", default=None, help="Content pack name or path.")
@click.option("--model", default=None, help="Override LLM model.")
@click.option(
"--backend",
default=None,
type=click.Choice(KNOWN_BACKENDS),
help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.",
)
@click.option("--output", "-o", default=None, help="Save review to a file.")
@click.option(
"--format",
Expand All @@ -295,7 +312,7 @@ def analyze(
show_default=True,
help="Output format.",
)
def self_check(context, pack, model, output, fmt) -> None:
def self_check(context, pack, model, backend, output, fmt) -> None:
r"""Review your own decision before sharing it.

\b
Expand All @@ -304,6 +321,11 @@ def self_check(context, pack, model, output, fmt) -> None:
greybeard self-check --context "migration plan" --format json --output check.json
"""
cfg = GreybeardConfig.load()

# Override backend if provided via --backend flag
if backend:
cfg.llm.backend = backend

pack_name = pack or cfg.default_pack

try:
Expand Down Expand Up @@ -354,6 +376,12 @@ def self_check(context, pack, model, output, fmt) -> None:
"--pack", "-p", default="mentor-mode", show_default=True, help="Content pack name or path."
)
@click.option("--model", default=None, help="Override LLM model.")
@click.option(
"--backend",
default=None,
type=click.Choice(KNOWN_BACKENDS),
help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.",
)
@click.option("--output", "-o", default=None, help="Save to a file.")
@click.option(
"--format",
Expand All @@ -370,7 +398,7 @@ def self_check(context, pack, model, output, fmt) -> None:
is_flag=True,
help="Start interactive REPL after initial analysis.",
)
def coach(audience, context, pack, model, output, fmt, interactive) -> None:
def coach(audience, context, pack, model, backend, output, fmt, interactive) -> None:
r"""Get help communicating a concern or decision constructively.

\b
Expand All @@ -381,6 +409,11 @@ def coach(audience, context, pack, model, output, fmt, interactive) -> None:
greybeard coach --audience team --context "shipping too fast" --interactive
"""
cfg = GreybeardConfig.load()

# Override backend if provided via --backend flag
if backend:
cfg.llm.backend = backend

try:
content_pack = load_pack(pack)
except FileNotFoundError as e:
Expand Down Expand Up @@ -604,7 +637,7 @@ def config_set(key: str, value: str) -> None:

\b
Keys:
llm.backend openai | anthropic | ollama | lmstudio
llm.backend openai | anthropic | copilot | groq | ollama | lmstudio
llm.model e.g. gpt-4o, claude-3-5-sonnet-20241022, llama3.2
llm.base_url e.g. http://localhost:11434/v1
llm.api_key_env e.g. OPENAI_API_KEY
Expand Down Expand Up @@ -1140,7 +1173,13 @@ def adr_list(repo) -> None:
default=None,
help="Output file path (default: batch-analysis.{format}).",
)
def batch_analyze(reviews: tuple[str, ...], output_format: str, output: str | None) -> None:
@click.option(
"--backend",
default=None,
type=click.Choice(KNOWN_BACKENDS),
help="LLM backend: openai, anthropic, copilot, groq, ollama, lmstudio.",
)
def batch_analyze(reviews: tuple[str, ...], output_format: str, output: str | None, backend: str | None) -> None:
"""Analyze and aggregate multiple reviews.

Combines multiple review outputs, deduplicates findings,
Expand Down
6 changes: 5 additions & 1 deletion greybeard/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
PACK_CACHE_DIR = CONFIG_DIR / "packs"

# Backend names we know about
KNOWN_BACKENDS = ["openai", "anthropic", "ollama", "lmstudio"]
KNOWN_BACKENDS = ["openai", "anthropic", "ollama", "lmstudio", "copilot", "groq"]

# Default models per backend.
# Anthropic default is claude-haiku-4-5-20251001 (not Sonnet) — a deliberate
Expand All @@ -31,6 +31,8 @@
"anthropic": "claude-haiku-4-5-20251001",
"ollama": "llama3.2",
"lmstudio": "local-model",
"copilot": "gpt-4-turbo", # Copilot provides access to Copilot-managed models
"groq": "llama-3.1-8b-instant", # Fast and cheap, good for simple tasks
}

# Default base URLs for local/alternate backends
Expand All @@ -45,6 +47,8 @@
"anthropic": "ANTHROPIC_API_KEY",
"ollama": "", # no key needed
"lmstudio": "", # no key needed
"copilot": "GITHUB_TOKEN", # GitHub Personal Access Token or GitHub CLI token
"groq": "GROQ_API_KEY", # Groq API key
}


Expand Down
Loading
Loading