Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions backend/.env.production.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# ==========================================
# CodeGuard AI - Production Environment Template
# Copy to .env.production and fill in real values
# ==========================================

# ===== APPLICATION =====
APP_NAME=CodeGuard AI
APP_VERSION=1.0.0
DEBUG=False
ENVIRONMENT=production

# ===== CLOUD RUN =====
# PORT is set automatically by Cloud Run
# PORT=8080

# ===== DATABASE (Supabase/Cloud SQL) =====
# Use your production Supabase connection string
DATABASE_URL=postgresql://postgres:PASSWORD@db.PROJECT.supabase.co:5432/postgres

# ===== REDIS (Optional - Cloud Memorystore) =====
# REDIS_URL=redis://MEMORYSTORE_IP:6379/0

# ===== AUTHENTICATION (Clerk - Production) =====
CLERK_SECRET_KEY=sk_live_xxxxxxxxxxxx
CLERK_PUBLISHABLE_KEY=pk_live_xxxxxxxxxxxx
CLERK_JWKS_URL=https://YOUR_CLERK_DOMAIN/.well-known/jwks.json
CLERK_JWT_SIGNING_KEY=your-jwt-signing-key

# ===== AI - VERTEX AI =====
# GCP Project with Vertex AI enabled
GCP_PROJECT_ID=your-gcp-project-id
GCP_LOCATION=us-central1

# Path to service account JSON (for local testing)
# In Cloud Run, use Workload Identity or mounted secrets
GOOGLE_APPLICATION_CREDENTIALS=/secrets/gcp-sa.json

# AI Settings
AI_ENABLED=true
AI_MODEL_DEV=gemini-2.0-flash-lite-001
AI_MODEL_PROD=gemini-2.5-flash-lite
AI_TEMPERATURE=0.3
AI_MAX_OUTPUT_TOKENS=2048
AI_RATE_LIMIT_PER_HOUR=50

# ===== CORS - VERCEL FRONTEND =====
# Add your production Vercel domain
# Supports wildcards for preview deployments
ALLOWED_ORIGINS=https://codeguard-unal.vercel.app,https://your-project.vercel.app

# ===== LOGGING =====
LOG_LEVEL=INFO
LOG_FORMAT=json
30 changes: 15 additions & 15 deletions backend/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# ==========================================
# CodeGuard AI - Backend Dockerfile
# Python 3.11 + FastAPI
# Optimized for Google Cloud Run
# ==========================================

FROM python:3.11-slim
Expand All @@ -9,21 +10,23 @@ FROM python:3.11-slim
LABEL maintainer="CodeGuard AI Team <team@codeguard.com>"
LABEL description="Multi-Agent Code Review System - Backend API"

# Environment variables
# Environment variables for Python optimization
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1
PIP_DISABLE_PIP_VERSION_CHECK=1 \
# Cloud Run uses PORT env variable
PORT=8080

# Set working directory
WORKDIR /app

# Install system dependencies
# Install system dependencies (minimal for Cloud Run)
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
postgresql-client \
curl \
&& rm -rf /var/lib/apt/lists/*
libpq-dev \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean

# Copy requirements first (for layer caching)
COPY requirements.txt .
Expand All @@ -34,16 +37,13 @@ RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .

# Create non-root user
# Create non-root user (Cloud Run best practice)
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
USER appuser

# Expose port
EXPOSE 8000
# Cloud Run will set PORT dynamically, expose default
EXPOSE 8080

# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1

# Run application
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]
# Cloud Run handles health checks via HTTP, no HEALTHCHECK needed
# Use shell form to allow $PORT expansion at runtime
CMD uvicorn src.main:app --host 0.0.0.0 --port $PORT
82 changes: 82 additions & 0 deletions backend/docker-compose.prod.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# ==========================================
# CodeGuard AI - Production Docker Compose
# For Google Cloud Run deployment
# ==========================================
#
# NOTE: This file is for LOCAL TESTING of production config.
# Cloud Run doesn't use docker-compose - it uses the Dockerfile directly.
# Use this to test the production image locally before deploying.
#
# Usage:
# docker-compose -f docker-compose.prod.yml up --build
#
# ==========================================

version: '3.9'

services:
# ==========================================
# Backend API (Production-like)
# ==========================================
backend:
build:
context: .
dockerfile: Dockerfile
container_name: codeguard-backend-prod
ports:
- "8080:8080"
environment:
# Cloud Run uses PORT env var
PORT: "8080"

# Application
APP_NAME: "CodeGuard AI"
DEBUG: "False"
ENVIRONMENT: "production"

# Database (Cloud SQL or Supabase)
DATABASE_URL: "${DATABASE_URL}"

# Redis (Cloud Memorystore or external)
REDIS_URL: "${REDIS_URL:-}"

# Auth (Clerk - Production keys)
CLERK_SECRET_KEY: "${CLERK_SECRET_KEY}"
CLERK_PUBLISHABLE_KEY: "${CLERK_PUBLISHABLE_KEY}"
CLERK_JWKS_URL: "${CLERK_JWKS_URL}"
CLERK_JWT_SIGNING_KEY: "${CLERK_JWT_SIGNING_KEY}"

# AI - Vertex AI (GCP Service Account)
GCP_PROJECT_ID: "${GCP_PROJECT_ID}"
GCP_LOCATION: "${GCP_LOCATION:-us-central1}"
GOOGLE_APPLICATION_CREDENTIALS: "/secrets/gcp-sa.json"
AI_ENABLED: "${AI_ENABLED:-true}"
AI_MODEL_DEV: "${AI_MODEL_DEV:-gemini-2.0-flash-lite-001}"
AI_MODEL_PROD: "${AI_MODEL_PROD:-gemini-2.5-flash-lite}"
AI_RATE_LIMIT_PER_HOUR: "${AI_RATE_LIMIT_PER_HOUR:-10}"

# CORS - Vercel domains
ALLOWED_ORIGINS: "${ALLOWED_ORIGINS:-https://codeguard-unal.vercel.app,https://*.vercel.app}"

# Logging
LOG_LEVEL: "INFO"
LOG_FORMAT: "json"

# For local testing with service account file
volumes:
- ${GOOGLE_APPLICATION_CREDENTIALS:-./gcp-sa.json}:/secrets/gcp-sa.json:ro

networks:
- codeguard-prod

# Health check endpoint
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

networks:
codeguard-prod:
driver: bridge
9 changes: 6 additions & 3 deletions backend/src/core/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,13 @@ class Settings(BaseSettings):

# API
API_HOST: str = "0.0.0.0"
API_PORT: int = 8000
API_PORT: int = Field(default=8000, description="Port for API, Cloud Run uses PORT env var")

# CORS
ALLOWED_ORIGINS: str = "http://localhost:3000,http://localhost:5173"
# CORS - Support Vercel preview URLs with wildcard patterns
ALLOWED_ORIGINS: str = Field(
default="http://localhost:3000,http://localhost:5173",
description="Comma-separated list of allowed origins. Supports wildcards for Vercel.",
)

# Redis (opcional)
REDIS_URL: Optional[str] = None
Expand Down
33 changes: 31 additions & 2 deletions backend/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,34 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

from src.core.config.settings import settings
from src.routers.analysis import router as analysis_router
from src.routers.auth import router as auth_router
from src.routers.findings import router as findings_router


def get_allowed_origins() -> list[str]:
"""
Parse ALLOWED_ORIGINS and expand wildcards for Vercel preview URLs.

Supports:
- Exact URLs: https://codeguard-unal.vercel.app
- Wildcards: https://*.vercel.app (converted to regex pattern)
"""
origins = settings.allowed_origins_list

# For production, we return exact origins
# Wildcard patterns like *.vercel.app need special handling
expanded = []
for origin in origins:
if "*" not in origin:
expanded.append(origin)
# Note: CORSMiddleware doesn't support wildcards directly
# For Vercel previews, add common patterns or use allow_origin_regex

return expanded


# Create FastAPI app
app = FastAPI(
title="CodeGuard AI",
Expand All @@ -19,10 +43,15 @@
redoc_url="/redoc",
)

# CORS
# CORS Configuration for Vercel + Local Development
# Build allow_origin_regex for Vercel preview deployments
vercel_regex = r"https://.*\.vercel\.app"
localhost_regex = r"http://localhost:\d+"

app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000", "http://localhost:5173"],
allow_origins=get_allowed_origins(),
allow_origin_regex=f"({vercel_regex}|{localhost_regex})",
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
Expand Down
60 changes: 60 additions & 0 deletions backend/src/routers/analysis.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
from typing import Any, Dict, List
from uuid import UUID

from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status
from sqlalchemy.orm import Session

from src.core.dependencies.auth import get_current_user
from src.core.dependencies.get_db import get_db
from src.models.code_review import CodeReviewEntity
from src.models.finding import AgentFindingEntity
from src.repositories.code_review_repository import CodeReviewRepository
from src.schemas.analysis import AnalysisResponse
from src.schemas.user import User
Expand Down Expand Up @@ -64,3 +69,58 @@ async def analyze_code(
total_findings=result.total_findings,
created_at=result.created_at,
)


@router.get(
"/analyses/{analysis_id}/findings",
response_model=List[Dict[str, Any]],
status_code=status.HTTP_200_OK,
summary="Obtener findings de un análisis",
)
async def get_analysis_findings(
analysis_id: UUID,
current_user: User = Depends(get_current_user),
db: Session = Depends(get_db),
) -> List[Dict[str, Any]]:
"""
Obtiene todos los findings de un análisis específico.

Args:
analysis_id: UUID del análisis
current_user: Usuario autenticado
db: Sesión de base de datos

Returns:
Lista de findings con sus detalles
"""
# Verificar que el análisis existe
analysis = db.query(CodeReviewEntity).filter(CodeReviewEntity.id == analysis_id).first()

if not analysis:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=f"Análisis {analysis_id} no encontrado"
)

# Obtener findings del análisis
findings = (
db.query(AgentFindingEntity)
.filter(AgentFindingEntity.review_id == analysis_id)
.order_by(AgentFindingEntity.line_number)
.all()
)

return [
{
"id": str(f.id),
"agent_type": f.agent_type,
"severity": f.severity.value,
"issue_type": f.issue_type,
"line_number": f.line_number,
"message": f.message,
"code_snippet": f.code_snippet,
"suggestion": f.suggestion,
"ai_explanation": f.ai_explanation,
"created_at": f.created_at.isoformat() if f.created_at else None,
}
for f in findings
]
Loading