Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@ node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
dist/

# Environment and runtime data (passed or mounted at run time, not needed in image)
.env
.env.*
!.env.example
audit-logs/
repos/

# Runtime directories
sessions/
Expand Down
14 changes: 14 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,22 @@ ANTHROPIC_API_KEY=your-api-key-here
# OPENROUTER_API_KEY=sk-or-your-openrouter-key
# ROUTER_DEFAULT=openrouter,google/gemini-3-flash-preview

# =============================================================================
# OPTION 3: OpenAI-Compatible (local models, minimax, ollama, vLLM, etc.)
# =============================================================================
# Native support - no router. Use with: ./shannon start ... PROVIDER=openai
# Or set AI_PROVIDER=openai in .env

# AI_PROVIDER=openai
# AI_BASE_URL=http://localhost:8080/v1
# AI_MODEL=minimax
# AI_API_KEY=your-key-or-empty-for-local
# AI_REQUEST_TIMEOUT_MS=120000 # 2 min default; increase for slow local models
# AI_MAX_CONCURRENT_REQUESTS=2 # Max simultaneous completions API calls (for rate-limiting local models)

# =============================================================================
# Available Models
# =============================================================================
# OpenAI: gpt-5.2, gpt-5-mini
# OpenRouter: google/gemini-3-flash-preview
# Local: minimax, ollama models, vLLM, etc. (via OPTION 3)
19 changes: 19 additions & 0 deletions CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,25 @@ ROUTER_DEFAULT=openrouter,google/gemini-3-flash-preview

**Note:** Shannon is optimized for Anthropic's Claude models. Alternative providers are useful for cost savings during development but may produce varying results.

### Native OpenAI-Compatible Provider

Shannon supports a **native** OpenAI-compatible API path (no router) for local models and services like minimax, ollama, vLLM, etc. This bypasses the Claude Agent SDK entirely and uses direct API calls.

**Enable OpenAI provider:**
```bash
./shannon start URL=<url> REPO=<name> PROVIDER=openai AI_BASE_URL=http://localhost:8080/v1 AI_MODEL=minimax AI_API_KEY=optional
```

**Configuration (in .env):**
```bash
AI_PROVIDER=openai
AI_BASE_URL=http://localhost:8080/v1
AI_MODEL=minimax
AI_API_KEY=your-key-or-empty-for-local
```

**Benefits over router mode:** Direct API integration, no translation layer, works with any OpenAI-compatible endpoint. Supports built-in tools (bash, file read/write, search) and MCP (Shannon helper, Playwright).

## Troubleshooting

### Common Issues
Expand Down
3 changes: 3 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ WORKDIR /app
COPY package*.json ./
COPY mcp-server/package*.json ./mcp-server/

# openai wants zod ^3, @anthropic-ai/claude-agent-sdk wants zod ^4 - use legacy peer deps for all npm ops
ENV npm_config_legacy_peer_deps=true

# Install Node.js dependencies (including devDependencies for TypeScript build)
RUN npm ci && \
cd mcp-server && npm ci && cd .. && \
Expand Down
7 changes: 7 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,13 @@ services:
- ANTHROPIC_AUTH_TOKEN=${ANTHROPIC_AUTH_TOKEN:-} # Auth token for router
- ROUTER_DEFAULT=${ROUTER_DEFAULT:-} # Model name when using router (e.g., "gemini,gemini-2.5-pro")
- CLAUDE_CODE_OAUTH_TOKEN=${CLAUDE_CODE_OAUTH_TOKEN:-}
- CLAUDE_CODE_MAX_OUTPUT_TOKENS=${CLAUDE_CODE_MAX_OUTPUT_TOKENS:-64000}
- AI_PROVIDER=${AI_PROVIDER:-claude}
- AI_BASE_URL=${AI_BASE_URL:-}
- AI_MODEL=${AI_MODEL:-}
- AI_API_KEY=${AI_API_KEY:-}
- AI_REQUEST_TIMEOUT_MS=${AI_REQUEST_TIMEOUT_MS:-120000}
- AI_MAX_CONCURRENT_REQUESTS=${AI_MAX_CONCURRENT_REQUESTS:-2}
depends_on:
temporal:
condition: service_healthy
Expand Down
4 changes: 2 additions & 2 deletions mcp-server/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ export function createShannonHelperServer(targetDir: string): ReturnType<typeof
}

// Export factory for direct usage if needed
export { createSaveDeliverableTool } from './tools/save-deliverable.js';
export { generateTotpTool } from './tools/generate-totp.js';
export { createSaveDeliverableTool, createSaveDeliverableHandler } from './tools/save-deliverable.js';
export { generateTotpTool, generateTotp } from './tools/generate-totp.js';

// Export types for external use
export * from './types/index.js';
2 changes: 1 addition & 1 deletion mcp-server/src/tools/generate-totp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,5 +124,5 @@ export const generateTotpTool = tool(
'generate_totp',
'Generates 6-digit TOTP code for authentication. Secret must be base32-encoded.',
GenerateTotpInputSchema.shape,
generateTotp
generateTotp as unknown as (args: { secret: string }, extra?: unknown) => Promise<{ content: Array<{ type: 'text'; text: string }>; [key: string]: unknown }>
);
6 changes: 4 additions & 2 deletions mcp-server/src/tools/save-deliverable.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ export type SaveDeliverableInput = z.infer<typeof SaveDeliverableInputSchema>;
*
* This factory pattern ensures each MCP server instance has its own targetDir,
* preventing race conditions when multiple workflows run in parallel.
* Exported for direct use by OpenAI executor (bypasses MCP protocol).
*/
function createSaveDeliverableHandler(targetDir: string) {
export function createSaveDeliverableHandler(targetDir: string) {
return async function saveDeliverable(args: SaveDeliverableInput): Promise<ToolResult> {
try {
const { deliverable_type, content } = args;
Expand Down Expand Up @@ -92,10 +93,11 @@ function createSaveDeliverableHandler(targetDir: string) {
* deliverables are saved to the correct workflow's directory.
*/
export function createSaveDeliverableTool(targetDir: string) {
const handler = createSaveDeliverableHandler(targetDir);
return tool(
'save_deliverable',
'Saves deliverable files with automatic validation. Queue files must have {"vulnerabilities": [...]} structure.',
SaveDeliverableInputSchema.shape,
createSaveDeliverableHandler(targetDir)
handler as unknown as (args: SaveDeliverableInput, extra?: unknown) => Promise<{ content: Array<{ type: 'text'; text: string }>; [key: string]: unknown }>
);
}
2 changes: 1 addition & 1 deletion mcp-server/src/types/tool-responses.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ export type ToolResponse =
| GenerateTotpResponse;

export interface ToolResultContent {
type: string;
type: 'text';
text: string;
}

Expand Down
Loading