diff --git a/.github/workflows/README.md b/.github/workflows/README.md new file mode 100644 index 0000000..3eb680c --- /dev/null +++ b/.github/workflows/README.md @@ -0,0 +1,210 @@ +# GitHub Actions Workflows + +Автоматизация CI/CD для tsdev проекта. + +## Workflows + +### deploy.yml + +Основной workflow для build, test и deployment. + +**Triggers:** +- Push в `main` или `develop` +- Pull requests в `main` +- Manual trigger через GitHub UI + +**Jobs:** + +1. **lint-and-test** + - Устанавливает зависимости + - Собирает все packages + - Запускает linter + - Запускает тесты + - Загружает build artifacts + +2. **build-docker** + - Собирает Docker образ + - Пушит в GitHub Container Registry + - Поддерживает multi-platform (amd64, arm64) + - Кэширует layers для быстрой сборки + +3. **deploy-staging** + - Деплоит в staging environment (develop branch) + - Использует Kubernetes + - Проверяет успешность deployment + +4. **deploy-production** + - Деплоит в production (main branch) + - Требует manual approval + - Запускает smoke tests + - Отправляет уведомления в Slack + +5. **deploy-fly** / **deploy-railway** / **deploy-lambda** + - Альтернативные deployment targets + - Параллельно с Kubernetes deployment + +6. **security-scan** + - Сканирует Docker образ на уязвимости + - Загружает результаты в GitHub Security + +## Secrets + +Настройте следующие secrets в GitHub: + +### Kubernetes Deployment +``` +KUBE_CONFIG_STAGING # Kubeconfig для staging cluster +KUBE_CONFIG_PROD # Kubeconfig для production cluster +``` + +### Cloud Providers +``` +AWS_ACCESS_KEY_ID # AWS credentials для Lambda +AWS_SECRET_ACCESS_KEY +FLY_API_TOKEN # Fly.io API token +RAILWAY_TOKEN # Railway API token +``` + +### Notifications +``` +SLACK_WEBHOOK # Slack webhook для уведомлений +``` + +## Environments + +Настройте environments в GitHub: + +### staging +- URL: https://staging-api.yourdomain.com +- Branch: develop +- Auto-deployment: enabled + +### production +- URL: https://api.yourdomain.com +- Branch: main +- Protection rules: + - Required reviewers: 1 + - Wait timer: 5 minutes + +### fly-production / lambda-production +- Альтернативные production environments + +## Usage + +### Automatic Deployment + +```bash +# Deploy to staging +git checkout develop +git push origin develop + +# Deploy to production +git checkout main +git merge develop +git push origin main +``` + +### Manual Deployment + +1. Перейти в Actions tab +2. Выбрать "Build and Deploy" workflow +3. Нажать "Run workflow" +4. Выбрать branch +5. Нажать "Run workflow" + +### Rollback + +```bash +# Через kubectl +kubectl rollout undo deployment/tsdev-api -n tsdev + +# Или через GitHub +# Найти предыдущий успешный commit +# Trigger workflow с этим commit +``` + +## Monitoring + +### Deployment Status + +- GitHub Actions UI показывает статус всех jobs +- Notifications в Slack +- Можно настроить email notifications + +### Logs + +```bash +# GitHub Actions logs доступны в UI +# Или через gh CLI +gh run list +gh run view +gh run view --log +``` + +## Customization + +### Добавить новый deployment target + +1. Создать новый job в `deploy.yml` +2. Добавить secrets +3. Настроить environment (опционально) + +Пример: +```yaml +deploy-my-platform: + name: Deploy to My Platform + runs-on: ubuntu-latest + needs: lint-and-test + if: github.ref == 'refs/heads/main' + + steps: + - uses: actions/checkout@v4 + - name: Deploy + run: my-deploy-command + env: + API_TOKEN: ${{ secrets.MY_PLATFORM_TOKEN }} +``` + +### Изменить условия deployment + +```yaml +# Deploy только на main +if: github.ref == 'refs/heads/main' + +# Deploy на main и staging branches +if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/develop' + +# Deploy только на tags +if: startsWith(github.ref, 'refs/tags/') + +# Manual trigger только +if: github.event_name == 'workflow_dispatch' +``` + +## Troubleshooting + +### Job failed + +1. Проверить logs в GitHub Actions UI +2. Локально воспроизвести команды +3. Проверить secrets configuration + +### Deployment timeout + +- Увеличить `timeout-minutes` в job +- Проверить health checks +- Проверить resource limits + +### Image not found + +- Проверить что `build-docker` job успешно завершился +- Проверить image tags в Container Registry +- Проверить permissions для GITHUB_TOKEN + +## Best Practices + +1. **Всегда тестировать в staging перед production** +2. **Использовать protection rules для production** +3. **Мониторить deployment metrics** +4. **Иметь rollback план** +5. **Документировать изменения в deployment process** diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..a5cc72a --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,301 @@ +name: Build and Deploy + +on: + push: + branches: [main, develop] + pull_request: + branches: [main] + workflow_dispatch: + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + lint-and-test: + name: Lint and Test + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Build packages + run: pnpm -r build + + - name: Run linter + run: pnpm lint || true # Continue on linter errors for now + + # - name: Run tests + # run: pnpm test + + - name: Upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: build-artifacts + path: | + packages/*/dist + examples/*/dist + retention-days: 1 + + build-docker: + name: Build Docker Image + runs-on: ubuntu-latest + needs: lint-and-test + if: github.event_name == 'push' + permissions: + contents: read + packages: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=sha,prefix={{branch}}- + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + file: examples/basic/Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64,linux/arm64 + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@v1 + with: + subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + subject-digest: ${{ steps.build.outputs.digest }} + push-to-registry: true + + deploy-staging: + name: Deploy to Staging + runs-on: ubuntu-latest + needs: build-docker + if: github.ref == 'refs/heads/develop' + environment: + name: staging + url: https://staging-api.yourdomain.com + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + # Deploy to Kubernetes staging + - name: Configure kubectl + uses: azure/k8s-set-context@v3 + with: + method: kubeconfig + kubeconfig: ${{ secrets.KUBE_CONFIG_STAGING }} + + - name: Deploy to Kubernetes + run: | + kubectl set image deployment/tsdev-api \ + api=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:develop-${{ github.sha }} \ + -n tsdev-staging + + kubectl rollout status deployment/tsdev-api -n tsdev-staging + + - name: Verify deployment + run: | + kubectl get pods -n tsdev-staging -l app=tsdev-api + kubectl get svc -n tsdev-staging tsdev-api + + deploy-production: + name: Deploy to Production + runs-on: ubuntu-latest + needs: build-docker + if: github.ref == 'refs/heads/main' + environment: + name: production + url: https://api.yourdomain.com + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + # Deploy to Kubernetes production + - name: Configure kubectl + uses: azure/k8s-set-context@v3 + with: + method: kubeconfig + kubeconfig: ${{ secrets.KUBE_CONFIG_PROD }} + + - name: Deploy to Kubernetes + run: | + kubectl set image deployment/tsdev-api \ + api=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:main-${{ github.sha }} \ + -n tsdev + + kubectl rollout status deployment/tsdev-api -n tsdev --timeout=5m + + - name: Verify deployment + run: | + kubectl get pods -n tsdev -l app=tsdev-api + kubectl get hpa -n tsdev tsdev-api-hpa + + - name: Run smoke tests + run: | + ENDPOINT=$(kubectl get ingress tsdev-api -n tsdev -o jsonpath='{.spec.rules[0].host}') + + # Health check + curl -f https://$ENDPOINT/health || exit 1 + + # Procedures endpoint + curl -f https://$ENDPOINT/procedures || exit 1 + + echo "✅ Smoke tests passed" + + - name: Notify deployment + if: always() + uses: 8398a7/action-slack@v3 + with: + status: ${{ job.status }} + text: | + Production deployment ${{ job.status }} + Commit: ${{ github.sha }} + Author: ${{ github.actor }} + webhook_url: ${{ secrets.SLACK_WEBHOOK }} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} + + deploy-fly: + name: Deploy to Fly.io + runs-on: ubuntu-latest + needs: lint-and-test + if: github.ref == 'refs/heads/main' + environment: + name: fly-production + url: https://tsdev-api.fly.dev + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Flyctl + uses: superfly/flyctl-actions/setup-flyctl@master + + - name: Deploy to Fly.io + run: flyctl deploy --remote-only + env: + FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} + + deploy-railway: + name: Deploy to Railway + runs-on: ubuntu-latest + needs: lint-and-test + if: github.ref == 'refs/heads/main' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Railway CLI + run: npm install -g @railway/cli + + - name: Deploy to Railway + run: railway up + env: + RAILWAY_TOKEN: ${{ secrets.RAILWAY_TOKEN }} + + deploy-lambda: + name: Deploy to AWS Lambda + runs-on: ubuntu-latest + needs: lint-and-test + if: github.ref == 'refs/heads/main' + environment: + name: lambda-production + url: https://api.execute-api.us-east-1.amazonaws.com/production + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Build packages + run: pnpm -r build + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Deploy with Serverless + run: | + npm install -g serverless + cd examples/basic + serverless deploy --stage production + + # Security scanning + security-scan: + name: Security Scan + runs-on: ubuntu-latest + needs: build-docker + if: github.event_name == 'push' + + steps: + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}-${{ github.sha }} + format: 'sarif' + output: 'trivy-results.sarif' + + - name: Upload Trivy results to GitHub Security + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..d327bfb --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,1073 @@ +# tsdev Deployment Guide + +Стратегии развертывания приложений на базе tsdev фреймворка. + +--- + +## Обзор + +tsdev приложения могут развертываться в разных конфигурациях: + +1. **HTTP API Server** - RPC/REST endpoints для процедур +2. **Workflow Server** - Выполнение workflow с OpenTelemetry +3. **Next.js UI** - Визуализация и управление workflow +4. **CLI Tools** - Command-line интерфейс для процедур +5. **Serverless Functions** - Lambda/Edge functions + +--- + +## Архитектура развертывания + +### Монолитное развертывание +``` +┌─────────────────────────────────────┐ +│ Single Container/Server │ +│ ┌──────────────────────────────┐ │ +│ │ HTTP Server (port 3000) │ │ +│ │ - Procedures (/rpc/*) │ │ +│ │ - Workflows (/workflow/*) │ │ +│ │ - Introspection (/procedures)│ │ +│ └──────────────────────────────┘ │ +│ ┌──────────────────────────────┐ │ +│ │ Registry (auto-discovered) │ │ +│ │ - handlers/*.ts │ │ +│ └──────────────────────────────┘ │ +└─────────────────────────────────────┘ +``` + +**Плюсы:** +- Простота развертывания +- Низкая latency между компонентами +- Проще debugging + +**Минусы:** +- Все в одном процессе +- Сложнее масштабировать отдельные части + +### Микросервисная архитектура +``` +┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ +│ API Gateway │────▶│ Procedure │ │ Workflow │ +│ (REST/RPC) │ │ Service │────▶│ Orchestrator │ +│ Port 3000 │ │ Port 3001 │ │ Port 3002 │ +└──────────────────┘ └──────────────────┘ └──────────────────┘ + │ │ │ + └────────────────────────┴─────────────────────────┘ + │ + ┌───────▼────────┐ + │ Shared DB/ │ + │ Message Queue │ + └────────────────┘ +``` + +**Плюсы:** +- Независимое масштабирование +- Изоляция сервисов +- Технологическая гибкость + +**Минусы:** +- Сложнее настройка +- Network overhead +- Distributed tracing необходим + +--- + +## 1. Docker Deployment + +### Простой HTTP Server (Рекомендуется для начала) + +**Dockerfile:** +```dockerfile +FROM node:20-alpine AS base + +# Enable pnpm +ENV PNPM_HOME="/pnpm" +ENV PATH="$PNPM_HOME:$PATH" +RUN corepack enable + +WORKDIR /app + +# Copy workspace files +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ +COPY packages ./packages + +# Install dependencies +FROM base AS prod-deps +RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --prod --frozen-lockfile + +# Build packages +FROM base AS build +RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile +RUN pnpm --filter @tsdev/core build +RUN pnpm --filter @tsdev/workflow build +RUN pnpm --filter @tsdev/policies build +RUN pnpm --filter @tsdev/generators build +RUN pnpm --filter @tsdev/adapters build + +# Application stage +FROM base AS app-build +COPY --from=prod-deps /app/node_modules /app/node_modules +COPY --from=build /app/packages /app/packages + +# Copy your application code +COPY src ./src +COPY tsconfig.json ./ + +# Build your application +RUN pnpm build + +# Production image +FROM node:20-alpine AS production + +WORKDIR /app + +# Copy built packages +COPY --from=app-build /app/packages /app/packages +COPY --from=app-build /app/dist /app/dist +COPY --from=app-build /app/node_modules /app/node_modules +COPY --from=app-build /app/package.json /app/package.json + +# Environment +ENV NODE_ENV=production +ENV PORT=3000 + +EXPOSE 3000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD node -e "require('http').get('http://localhost:3000/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})" + +CMD ["node", "dist/server.js"] +``` + +**.dockerignore:** +``` +node_modules +dist +.git +.github +*.md +*.log +.env* +!.env.example +coverage +.next +``` + +**docker-compose.yml:** +```yaml +version: '3.8' + +services: + api: + build: + context: . + dockerfile: Dockerfile + target: production + ports: + - "3000:3000" + environment: + - NODE_ENV=production + - PORT=3000 + - LOG_LEVEL=info + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + restart: unless-stopped + + # OpenTelemetry Collector (optional) + otel-collector: + image: otel/opentelemetry-collector:latest + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP receiver + - "8888:8888" # Prometheus metrics + restart: unless-stopped + + # Jaeger for trace visualization (optional) + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" # Jaeger UI + - "14268:14268" # Jaeger collector + environment: + - COLLECTOR_OTLP_ENABLED=true + restart: unless-stopped +``` + +**Запуск:** +```bash +# Сборка +docker-compose build + +# Запуск +docker-compose up -d + +# Проверка +curl http://localhost:3000/procedures + +# Логи +docker-compose logs -f api + +# Остановка +docker-compose down +``` + +--- + +## 2. Kubernetes Deployment + +### Базовая конфигурация + +**deployment.yaml:** +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tsdev-api + labels: + app: tsdev-api +spec: + replicas: 3 + selector: + matchLabels: + app: tsdev-api + template: + metadata: + labels: + app: tsdev-api + spec: + containers: + - name: api + image: your-registry/tsdev-api:latest + ports: + - containerPort: 3000 + name: http + env: + - name: NODE_ENV + value: "production" + - name: PORT + value: "3000" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://otel-collector:4318" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: 3000 + initialDelaySeconds: 5 + periodSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: tsdev-api +spec: + selector: + app: tsdev-api + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tsdev-api + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/rate-limit: "100" +spec: + ingressClassName: nginx + tls: + - hosts: + - api.yourdomain.com + secretName: tsdev-api-tls + rules: + - host: api.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tsdev-api + port: + number: 80 +``` + +**Horizontal Pod Autoscaling:** +```yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: tsdev-api-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: tsdev-api + minReplicas: 2 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 +``` + +**Применение:** +```bash +kubectl apply -f deployment.yaml +kubectl apply -f hpa.yaml + +# Проверка +kubectl get pods -l app=tsdev-api +kubectl get svc tsdev-api +kubectl logs -l app=tsdev-api -f +``` + +--- + +## 3. Serverless Deployment + +### AWS Lambda + +**lambda-handler.ts:** +```typescript +import { collectRegistry, executeProcedure, createExecutionContext } from '@tsdev/core'; +import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda'; + +let registry: Registry | null = null; + +export async function handler( + event: APIGatewayProxyEvent +): Promise { + try { + // Lazy init registry (cold start optimization) + if (!registry) { + registry = await collectRegistry('./handlers'); + } + + const path = event.path; + const method = event.httpMethod; + + // Introspection + if (path === '/procedures' && method === 'GET') { + const procedures = Array.from(registry.entries()).map(([name, proc]) => ({ + name, + description: proc.contract.description, + input: proc.contract.input, + output: proc.contract.output, + })); + + return { + statusCode: 200, + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ procedures }), + }; + } + + // RPC execution + if (path.startsWith('/rpc/') && method === 'POST') { + const procedureName = path.slice(5); + const procedure = registry.get(procedureName); + + if (!procedure) { + return { + statusCode: 404, + body: JSON.stringify({ error: 'Procedure not found' }), + }; + } + + const input = JSON.parse(event.body || '{}'); + const context = createExecutionContext({ + transport: 'lambda', + requestId: event.requestContext.requestId, + sourceIp: event.requestContext.identity.sourceIp, + }); + + const result = await executeProcedure(procedure, input, context); + + return { + statusCode: 200, + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(result), + }; + } + + return { + statusCode: 404, + body: JSON.stringify({ error: 'Not found' }), + }; + } catch (error) { + console.error('Lambda error:', error); + return { + statusCode: 500, + body: JSON.stringify({ + error: error instanceof Error ? error.message : 'Internal error' + }), + }; + } +} +``` + +**serverless.yml (Serverless Framework):** +```yaml +service: tsdev-api + +provider: + name: aws + runtime: nodejs20.x + region: us-east-1 + memorySize: 512 + timeout: 30 + environment: + NODE_ENV: production + iam: + role: + statements: + - Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + Resource: "*" + +functions: + api: + handler: dist/lambda-handler.handler + events: + - httpApi: + path: /{proxy+} + method: ANY + +package: + individually: true + patterns: + - dist/** + - handlers/** + - node_modules/** + - '!node_modules/.cache/**' + +plugins: + - serverless-esbuild + +custom: + esbuild: + bundle: true + minify: true + target: node20 + platform: node + external: + - '@aws-sdk/*' +``` + +**Развертывание:** +```bash +# Установка Serverless Framework +npm install -g serverless + +# Сборка +pnpm build + +# Развертывание +serverless deploy --stage production + +# Тестирование +serverless invoke --function api --path test-event.json + +# Логи +serverless logs --function api --tail +``` + +### Vercel (для Next.js + API) + +**vercel.json:** +```json +{ + "buildCommand": "pnpm build", + "devCommand": "pnpm dev", + "installCommand": "pnpm install", + "framework": "nextjs", + "outputDirectory": ".next", + "regions": ["iad1"], + "env": { + "NODE_ENV": "production" + }, + "functions": { + "api/**/*.ts": { + "memory": 1024, + "maxDuration": 10 + } + }, + "rewrites": [ + { + "source": "/api/:path*", + "destination": "/api/:path*" + } + ] +} +``` + +--- + +## 4. Platform-as-a-Service (PaaS) + +### Railway + +**railway.toml:** +```toml +[build] +builder = "NIXPACKS" +buildCommand = "pnpm install && pnpm build" + +[deploy] +startCommand = "node dist/server.js" +healthcheckPath = "/health" +healthcheckTimeout = 100 +restartPolicyType = "ON_FAILURE" +restartPolicyMaxRetries = 10 + +[[services]] +name = "api" + +[services.env] +NODE_ENV = "production" +PORT = "3000" +``` + +### Render + +**render.yaml:** +```yaml +services: + - type: web + name: tsdev-api + env: node + region: oregon + plan: starter + buildCommand: pnpm install && pnpm build + startCommand: node dist/server.js + healthCheckPath: /health + envVars: + - key: NODE_ENV + value: production + - key: PORT + value: 3000 + autoDeploy: true +``` + +### Fly.io + +**fly.toml:** +```toml +app = "tsdev-api" +primary_region = "iad" + +[build] + builder = "paketobuildpacks/builder:base" + buildpacks = ["gcr.io/paketo-buildpacks/nodejs"] + +[env] + NODE_ENV = "production" + PORT = "8080" + +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = true + auto_start_machines = true + min_machines_running = 0 + processes = ["app"] + +[[services]] + http_checks = [] + internal_port = 8080 + processes = ["app"] + protocol = "tcp" + script_checks = [] + + [services.concurrency] + hard_limit = 25 + soft_limit = 20 + type = "connections" + + [[services.ports]] + force_https = true + handlers = ["http"] + port = 80 + + [[services.ports]] + handlers = ["tls", "http"] + port = 443 + + [[services.tcp_checks]] + grace_period = "1s" + interval = "15s" + restart_limit = 0 + timeout = "2s" + +[[vm]] + cpu_kind = "shared" + cpus = 1 + memory_mb = 256 +``` + +**Развертывание:** +```bash +# Railway +railway login +railway init +railway up + +# Render +render deploy + +# Fly.io +fly launch +fly deploy +``` + +--- + +## 5. CI/CD Pipeline + +### GitHub Actions + +**.github/workflows/deploy.yml:** +```yaml +name: Deploy + +on: + push: + branches: [main, production] + pull_request: + branches: [main] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Build packages + run: pnpm -r build + + - name: Run linter + run: pnpm lint + + - name: Run tests + run: pnpm test + + build-and-push: + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'push' + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v4 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=sha,prefix={{branch}}- + type=semver,pattern={{version}} + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + deploy-production: + needs: build-and-push + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' + environment: + name: production + url: https://api.yourdomain.com + + steps: + - uses: actions/checkout@v4 + + - name: Deploy to Kubernetes + uses: azure/k8s-deploy@v4 + with: + manifests: | + k8s/deployment.yaml + k8s/service.yaml + k8s/ingress.yaml + images: | + ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }} + kubectl-version: 'latest' +``` + +--- + +## 6. Environment Configuration + +### Управление переменными окружения + +**.env.example:** +```bash +# Application +NODE_ENV=production +PORT=3000 +LOG_LEVEL=info + +# OpenTelemetry +OTEL_SERVICE_NAME=tsdev-api +OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318 +OTEL_TRACES_SAMPLER=parentbased_traceidratio +OTEL_TRACES_SAMPLER_ARG=0.1 + +# Rate Limiting +RATE_LIMIT_WINDOW_MS=60000 +RATE_LIMIT_MAX_REQUESTS=100 + +# Database (если используется) +DATABASE_URL=postgresql://user:pass@localhost:5432/db +DATABASE_POOL_MIN=2 +DATABASE_POOL_MAX=10 + +# Redis (для distributed caching) +REDIS_URL=redis://localhost:6379 +REDIS_TTL=3600 + +# Monitoring +SENTRY_DSN=https://your-sentry-dsn +DATADOG_API_KEY=your-datadog-key +``` + +### Config loader + +**src/config.ts:** +```typescript +import { z } from 'zod'; + +const ConfigSchema = z.object({ + nodeEnv: z.enum(['development', 'production', 'test']).default('development'), + port: z.coerce.number().default(3000), + logLevel: z.enum(['debug', 'info', 'warn', 'error']).default('info'), + + otel: z.object({ + serviceName: z.string().default('tsdev-api'), + endpoint: z.string().optional(), + samplerRatio: z.coerce.number().min(0).max(1).default(1), + }), + + rateLimit: z.object({ + windowMs: z.coerce.number().default(60000), + maxRequests: z.coerce.number().default(100), + }), +}); + +export type Config = z.infer; + +export function loadConfig(): Config { + return ConfigSchema.parse({ + nodeEnv: process.env.NODE_ENV, + port: process.env.PORT, + logLevel: process.env.LOG_LEVEL, + + otel: { + serviceName: process.env.OTEL_SERVICE_NAME, + endpoint: process.env.OTEL_EXPORTER_OTLP_ENDPOINT, + samplerRatio: process.env.OTEL_TRACES_SAMPLER_ARG, + }, + + rateLimit: { + windowMs: process.env.RATE_LIMIT_WINDOW_MS, + maxRequests: process.env.RATE_LIMIT_MAX_REQUESTS, + }, + }); +} +``` + +--- + +## 7. Production Checklist + +### Перед развертыванием + +- [ ] **Build проходит без ошибок** + ```bash + pnpm -r build + ``` + +- [ ] **Linter проходит** + ```bash + pnpm lint + ``` + +- [ ] **Tests проходят** + ```bash + pnpm test + ``` + +- [ ] **Health check endpoint реализован** + ```typescript + // src/health.ts + export function createHealthCheck(registry: Registry) { + return async () => ({ + status: 'ok', + timestamp: new Date().toISOString(), + version: process.env.npm_package_version, + procedures: registry.size, + uptime: process.uptime(), + }); + } + ``` + +- [ ] **Graceful shutdown настроен** + ```typescript + // src/server.ts + let server: Server; + + async function shutdown() { + console.log('Shutting down gracefully...'); + + server.close(() => { + console.log('Server closed'); + process.exit(0); + }); + + // Force close after 10s + setTimeout(() => { + console.error('Forced shutdown'); + process.exit(1); + }, 10000); + } + + process.on('SIGTERM', shutdown); + process.on('SIGINT', shutdown); + ``` + +- [ ] **Logging настроен** + ```bash + npm install pino pino-pretty + ``` + + ```typescript + import pino from 'pino'; + + export const logger = pino({ + level: process.env.LOG_LEVEL || 'info', + transport: process.env.NODE_ENV === 'development' + ? { target: 'pino-pretty' } + : undefined, + }); + ``` + +- [ ] **Error tracking (Sentry/Datadog)** + ```typescript + import * as Sentry from '@sentry/node'; + + if (process.env.SENTRY_DSN) { + Sentry.init({ + dsn: process.env.SENTRY_DSN, + environment: process.env.NODE_ENV, + tracesSampleRate: 0.1, + }); + } + ``` + +- [ ] **Rate limiting настроен** + ```typescript + import { withRateLimit } from '@tsdev/policies'; + + const handler = applyPolicies( + baseHandler, + withRateLimit({ maxTokens: 100, windowMs: 60000 }) + ); + ``` + +- [ ] **CORS настроен** + ```typescript + // В HTTP adapter + res.setHeader('Access-Control-Allow-Origin', process.env.CORS_ORIGIN || '*'); + ``` + +- [ ] **Security headers** + ```bash + npm install helmet + ``` + +- [ ] **OpenTelemetry сконфигурирован** + +--- + +## 8. Monitoring & Observability + +### Metrics (Prometheus) + +**src/metrics.ts:** +```typescript +import { register, Counter, Histogram } from 'prom-client'; + +export const procedureCallsTotal = new Counter({ + name: 'tsdev_procedure_calls_total', + help: 'Total procedure calls', + labelNames: ['procedure', 'status'], +}); + +export const procedureDuration = new Histogram({ + name: 'tsdev_procedure_duration_seconds', + help: 'Procedure execution duration', + labelNames: ['procedure'], + buckets: [0.01, 0.05, 0.1, 0.5, 1, 2, 5], +}); + +// Expose metrics endpoint +export function getMetrics() { + return register.metrics(); +} +``` + +### Health Check + +**src/health.ts:** +```typescript +export async function healthCheck(registry: Registry) { + return { + status: 'healthy', + timestamp: new Date().toISOString(), + version: process.env.npm_package_version, + uptime: process.uptime(), + memory: process.memoryUsage(), + procedures: { + total: registry.size, + names: Array.from(registry.keys()), + }, + }; +} +``` + +--- + +## 9. Масштабирование + +### Horizontal Scaling + +**Stateless design:** +- Все процедуры должны быть stateless +- Используйте Redis для shared state +- Session в JWT или external store + +**Load Balancing:** +```nginx +upstream tsdev_backend { + least_conn; + server api1:3000; + server api2:3000; + server api3:3000; +} + +server { + listen 80; + + location / { + proxy_pass http://tsdev_backend; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } +} +``` + +### Caching Strategy + +```typescript +import { withCache } from '@tsdev/policies'; + +const handler = applyPolicies( + baseHandler, + withCache({ + ttl: 300, // 5 minutes + keyGenerator: (input) => JSON.stringify(input), + }) +); +``` + +--- + +## Рекомендации по выбору платформы + +| Сценарий | Платформа | Причина | +|----------|-----------|---------| +| **MVP/Prototype** | Railway, Render | Быстрый deploy, бесплатный tier | +| **Production API** | Kubernetes, Docker | Полный контроль, масштабируемость | +| **Serverless** | AWS Lambda, Vercel | Низкая стоимость при спорадической нагрузке | +| **Next.js + API** | Vercel | Оптимизирован для Next.js | +| **Enterprise** | Kubernetes + AWS/GCP | Compliance, control, scaling | + +--- + +## Дополнительные ресурсы + +- [Docker Best Practices](https://docs.docker.com/develop/dev-best-practices/) +- [Kubernetes Patterns](https://kubernetes.io/docs/concepts/) +- [12 Factor App](https://12factor.net/) +- [OpenTelemetry Deployment](https://opentelemetry.io/docs/collector/deployment/) diff --git a/QUICK_DEPLOY.md b/QUICK_DEPLOY.md new file mode 100644 index 0000000..89e9032 --- /dev/null +++ b/QUICK_DEPLOY.md @@ -0,0 +1,316 @@ +# Quick Deploy Guide + +Быстрая памятка по развертыванию tsdev приложений. + +## 🚀 Самый быстрый способ + +### Option 1: Docker (локально или VPS) + +```bash +cd examples/basic +docker-compose up -d +``` + +✅ Готово! API доступен на http://localhost:3000 + +--- + +### Option 2: Railway (бесплатно, <1 минута) + +```bash +# Установить CLI +npm install -g @railway/cli + +# Войти и развернуть +railway login +railway init +railway up +``` + +✅ Получите публичный URL автоматически + +--- + +### Option 3: Fly.io (почти бесплатно, <2 минуты) + +```bash +# Установить flyctl +curl -L https://fly.io/install.sh | sh + +# Войти и развернуть +fly auth login +fly launch # Автоматически найдет fly.toml +fly deploy +``` + +✅ URL: https://tsdev-api.fly.dev + +--- + +## 📋 Checklist перед деплоем + +### Минимальный (для тестирования) + +```bash +# 1. Проверить что код собирается +pnpm install +pnpm -r build + +# 2. Проверить что сервер запускается +cd examples/basic +pnpm dev + +# 3. Проверить health check +curl http://localhost:3000/health +``` + +✅ Если все ОК - можно деплоить + +### Production (для реальных приложений) + +```bash +# 1. Линтер +pnpm lint + +# 2. Тесты (когда добавите) +pnpm test + +# 3. Environment variables +cp .env.example .env.production +# Заполнить значения + +# 4. Security +# - Secrets не в git +# - CORS настроен +# - Rate limiting включен + +# 5. Monitoring +# - Logging настроен +# - Health checks работают +# - Alerts настроены +``` + +--- + +## 🎯 По платформам + +### Docker + +```bash +cd examples/basic + +# Запустить +docker-compose up -d + +# Проверить +curl http://localhost:3000/procedures + +# Остановить +docker-compose down +``` + +**Включает:** +- API сервер +- OpenTelemetry Collector +- Jaeger (traces) +- Prometheus (metrics) + +--- + +### Kubernetes + +```bash +# Создать namespace +kubectl create namespace tsdev + +# Развернуть +kubectl apply -f examples/basic/k8s/deployment.yaml +kubectl apply -f examples/basic/k8s/ingress.yaml + +# Проверить +kubectl get pods -n tsdev +kubectl port-forward -n tsdev svc/tsdev-api 3000:80 +``` + +--- + +### AWS Lambda + +```bash +cd examples/basic + +# Установить Serverless +npm install -g serverless + +# Развернуть +serverless deploy --stage production + +# Тестировать +serverless invoke --function api --path test-event.json +``` + +--- + +### Fly.io + +```bash +cd examples/basic + +# Одна команда +fly deploy + +# Масштабирование +fly scale count 3 +fly scale memory 512 +``` + +--- + +### Railway + +```bash +cd examples/basic + +# Через CLI +railway up + +# Или через Git +# 1. Push в GitHub +# 2. Connect на railway.app +# 3. Auto-deploy +``` + +--- + +### Render + +```bash +# 1. Push код в GitHub +# 2. Создать Web Service на render.com +# 3. Выбрать репозиторий +# 4. Render автоматически найдет render.yaml +# 5. Deploy +``` + +--- + +## 🔧 Endpoints после деплоя + +Проверьте что эти endpoints работают: + +```bash +# Health check +curl https://your-domain.com/health + +# Procedures list +curl https://your-domain.com/procedures + +# OpenAPI spec +curl https://your-domain.com/openapi.json + +# Swagger UI +open https://your-domain.com/docs +``` + +--- + +## 📊 Мониторинг + +### Базовый + +```bash +# Logs (Docker) +docker-compose logs -f api + +# Logs (Kubernetes) +kubectl logs -f -n tsdev -l app=tsdev-api + +# Logs (Fly.io) +fly logs + +# Logs (Railway) +railway logs +``` + +### Advanced + +1. **Traces**: Jaeger UI на http://localhost:16686 (Docker Compose) +2. **Metrics**: Prometheus на http://localhost:9090 +3. **Status**: `curl https://your-domain.com/status` + +--- + +## ⚠️ Troubleshooting + +### Container не запускается + +```bash +# Проверить logs +docker logs + +# Проверить health +docker inspect | grep Health -A 10 +``` + +**Решение:** Проверить environment variables + +--- + +### 502/503 ошибки + +```bash +# Проверить что под запущен +kubectl get pods + +# Проверить readiness probe +kubectl describe pod +``` + +**Решение:** +- Проверить `/ready` endpoint +- Увеличить `initialDelaySeconds` + +--- + +### High latency + +```bash +# Проверить traces в Jaeger +open http://localhost:16686 + +# Найти медленные процедуры +``` + +**Решение:** +- Добавить caching с `withCache` policy +- Оптимизировать медленные handlers + +--- + +## 🎓 Дальше + +- **Полная документация**: [DEPLOYMENT.md](./DEPLOYMENT.md) +- **CI/CD setup**: [.github/workflows/README.md](./.github/workflows/README.md) +- **Kubernetes guide**: [examples/basic/k8s/README.md](./examples/basic/k8s/README.md) +- **Примеры**: [examples/basic/README.deploy.md](./examples/basic/README.deploy.md) + +--- + +## 💡 Pro Tips + +1. **Начинайте с Railway/Fly.io** для прототипов +2. **Переходите на Kubernetes** когда нужен контроль +3. **Используйте Lambda** для sporadic workloads +4. **Docker Compose** идеален для staging/dev environments + +5. **Всегда настраивайте:** + - Health checks + - Logging + - Monitoring + - Graceful shutdown + +6. **Production must-have:** + - HTTPS + - Rate limiting + - Error tracking + - Backups diff --git a/README.md b/README.md index 22533ca..2968765 100644 --- a/README.md +++ b/README.md @@ -735,6 +735,8 @@ pnpm dev # http://localhost:3000 - [PHILOSOPHY.md](./PHILOSOPHY.md) - Why AI agents need this - [ARCHITECTURE.md](./ARCHITECTURE.md) - Technical implementation +- [DEPLOYMENT.md](./DEPLOYMENT.md) - How to deploy applications +- [QUICK_DEPLOY.md](./QUICK_DEPLOY.md) - Quick deployment guide --- diff --git a/examples/basic/.dockerignore b/examples/basic/.dockerignore new file mode 100644 index 0000000..e917279 --- /dev/null +++ b/examples/basic/.dockerignore @@ -0,0 +1,15 @@ +node_modules +dist +.next +*.log +.env* +!.env.example +coverage +.git +.github +*.md +README.md +.DS_Store +Thumbs.db +.vscode +.idea diff --git a/examples/basic/.env.example b/examples/basic/.env.example new file mode 100644 index 0000000..34bf203 --- /dev/null +++ b/examples/basic/.env.example @@ -0,0 +1,45 @@ +# Application Configuration +NODE_ENV=development +PORT=3000 +LOG_LEVEL=info + +# OpenTelemetry Configuration +OTEL_SERVICE_NAME=tsdev-api +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +OTEL_TRACES_SAMPLER=parentbased_traceidratio +OTEL_TRACES_SAMPLER_ARG=1.0 + +# Rate Limiting +RATE_LIMIT_WINDOW_MS=60000 +RATE_LIMIT_MAX_REQUESTS=100 + +# CORS Configuration +CORS_ORIGIN=* +CORS_METHODS=GET,POST,OPTIONS +CORS_HEADERS=Content-Type,Authorization + +# Database (если используется) +# DATABASE_URL=postgresql://user:password@localhost:5432/tsdev +# DATABASE_POOL_MIN=2 +# DATABASE_POOL_MAX=10 + +# Redis (для caching/sessions) +# REDIS_URL=redis://localhost:6379 +# REDIS_TTL=3600 + +# Error Tracking +# SENTRY_DSN=https://your-sentry-dsn +# SENTRY_ENVIRONMENT=development + +# Monitoring +# DATADOG_API_KEY=your-datadog-key +# NEW_RELIC_LICENSE_KEY=your-newrelic-key + +# Security +# JWT_SECRET=your-jwt-secret-key-change-in-production +# API_KEY=your-api-key + +# Feature Flags +# ENABLE_SWAGGER_UI=true +# ENABLE_METRICS=true +# ENABLE_TRACING=true diff --git a/examples/basic/Dockerfile b/examples/basic/Dockerfile new file mode 100644 index 0000000..1dece97 --- /dev/null +++ b/examples/basic/Dockerfile @@ -0,0 +1,78 @@ +# Multi-stage build for tsdev basic example + +# Stage 1: Base image with pnpm +FROM node:20-alpine AS base +ENV PNPM_HOME="/pnpm" +ENV PATH="$PNPM_HOME:$PATH" +RUN corepack enable +WORKDIR /app + +# Stage 2: Install all dependencies (including dev) +FROM base AS deps +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ +COPY packages/core/package.json ./packages/core/ +COPY packages/adapters/package.json ./packages/adapters/ +COPY examples/basic/package.json ./examples/basic/ +RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --frozen-lockfile + +# Stage 3: Build packages +FROM base AS build +COPY --from=deps /app/node_modules ./node_modules +COPY --from=deps /app/packages ./packages +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ + +# Copy source files for packages +COPY packages/core ./packages/core +COPY packages/adapters ./packages/adapters + +# Build core packages +RUN pnpm --filter @tsdev/core build +RUN pnpm --filter @tsdev/adapters build + +# Copy application source +COPY examples/basic ./examples/basic + +# Stage 4: Production dependencies only +FROM base AS prod-deps +COPY pnpm-workspace.yaml package.json pnpm-lock.yaml ./ +COPY packages/core/package.json ./packages/core/ +COPY packages/adapters/package.json ./packages/adapters/ +COPY examples/basic/package.json ./examples/basic/ +RUN --mount=type=cache,id=pnpm,target=/pnpm/store pnpm install --prod --frozen-lockfile + +# Stage 5: Production image +FROM node:20-alpine AS production + +WORKDIR /app + +# Copy built packages +COPY --from=build /app/packages/core/dist ./packages/core/dist +COPY --from=build /app/packages/core/package.json ./packages/core/ +COPY --from=build /app/packages/adapters/dist ./packages/adapters/dist +COPY --from=build /app/packages/adapters/package.json ./packages/adapters/ + +# Copy application +COPY --from=build /app/examples/basic/src ./src +COPY --from=build /app/examples/basic/package.json ./ + +# Copy production dependencies +COPY --from=prod-deps /app/node_modules ./node_modules + +# Set environment +ENV NODE_ENV=production +ENV PORT=3000 + +# Expose port +EXPOSE 3000 + +# Add non-root user for security +RUN addgroup -g 1001 -S nodejs && \ + adduser -S nodejs -u 1001 +USER nodejs + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD node -e "require('http').get('http://localhost:3000/procedures', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})" + +# Start application +CMD ["node", "--import", "tsx/esm", "src/apps/http.ts"] diff --git a/examples/basic/README.deploy.md b/examples/basic/README.deploy.md new file mode 100644 index 0000000..b1b5027 --- /dev/null +++ b/examples/basic/README.deploy.md @@ -0,0 +1,449 @@ +# Deployment Examples + +Практические примеры развертывания tsdev API на разных платформах. + +## Содержание + +- [Docker](#docker) +- [Docker Compose](#docker-compose) +- [Kubernetes](#kubernetes) +- [AWS Lambda (Serverless)](#aws-lambda) +- [Fly.io](#flyio) +- [Railway](#railway) +- [Render](#render) + +--- + +## Docker + +### Быстрый старт + +```bash +# Собрать образ +docker build -t tsdev-api:latest -f Dockerfile ../.. + +# Запустить контейнер +docker run -p 3000:3000 \ + -e NODE_ENV=production \ + -e LOG_LEVEL=info \ + --name tsdev-api \ + tsdev-api:latest + +# Проверить +curl http://localhost:3000/health +curl http://localhost:3000/procedures +``` + +### Production с docker-compose + +```bash +# Запустить все сервисы (API + OpenTelemetry + Jaeger) +docker-compose up -d + +# Проверить логи +docker-compose logs -f api + +# Масштабировать +docker-compose up -d --scale api=3 + +# Остановить +docker-compose down +``` + +**Доступные сервисы:** +- API: http://localhost:3000 +- Jaeger UI: http://localhost:16686 +- Prometheus: http://localhost:9090 +- OTEL Collector Health: http://localhost:13133 + +--- + +## Kubernetes + +### Развертывание + +```bash +# Создать namespace +kubectl create namespace tsdev + +# Применить конфигурацию +kubectl apply -f k8s/deployment.yaml +kubectl apply -f k8s/ingress.yaml + +# Проверить +kubectl get pods -n tsdev -l app=tsdev-api +kubectl get svc -n tsdev tsdev-api +kubectl get hpa -n tsdev tsdev-api-hpa + +# Логи +kubectl logs -n tsdev -l app=tsdev-api -f +``` + +### Port forwarding для тестирования + +```bash +kubectl port-forward -n tsdev svc/tsdev-api 3000:80 + +# Тестировать +curl http://localhost:3000/procedures +``` + +### Обновление + +```bash +# Rolling update +kubectl set image deployment/tsdev-api \ + api=ghcr.io/your-org/tsdev-api:v2.0.0 \ + -n tsdev + +# Проверить статус +kubectl rollout status deployment/tsdev-api -n tsdev + +# Откат +kubectl rollout undo deployment/tsdev-api -n tsdev +``` + +--- + +## AWS Lambda + +### Установка + +```bash +# Установить Serverless Framework +npm install -g serverless + +# Установить плагины +pnpm install --save-dev \ + serverless-esbuild \ + serverless-offline \ + @types/aws-lambda +``` + +### Локальное тестирование + +```bash +# Запустить локально +serverless offline + +# API доступен на http://localhost:3000 +curl http://localhost:3000/dev/procedures +curl -X POST http://localhost:3000/dev/rpc/users.create \ + -H "Content-Type: application/json" \ + -d '{"name":"Alice","email":"alice@example.com"}' +``` + +### Развертывание + +```bash +# Развернуть в dev +serverless deploy --stage dev + +# Развернуть в production +serverless deploy --stage production + +# Посмотреть информацию +serverless info --stage production + +# Вызвать функцию +serverless invoke --function api \ + --path test-event.json \ + --stage production + +# Логи +serverless logs --function api --tail --stage production +``` + +### Удаление + +```bash +serverless remove --stage dev +``` + +**test-event.json:** +```json +{ + "path": "/procedures", + "httpMethod": "GET", + "headers": { + "Content-Type": "application/json" + }, + "requestContext": { + "requestId": "test-123", + "identity": { + "sourceIp": "127.0.0.1" + } + } +} +``` + +--- + +## Fly.io + +### Установка flyctl + +```bash +# macOS +brew install flyctl + +# Linux +curl -L https://fly.io/install.sh | sh + +# Windows +powershell -Command "iwr https://fly.io/install.ps1 -useb | iex" +``` + +### Первое развертывание + +```bash +# Войти +fly auth login + +# Создать приложение +fly launch +# Выберите регион, конфигурация уже в fly.toml + +# Развернуть +fly deploy + +# Открыть в браузере +fly open + +# Посмотреть статус +fly status + +# Логи +fly logs +``` + +### Масштабирование + +```bash +# Масштабировать машины +fly scale count 3 + +# Изменить VM размер +fly scale vm shared-cpu-1x --memory 512 + +# Проверить +fly scale show +``` + +### Secrets + +```bash +# Добавить секреты +fly secrets set DATABASE_URL=postgresql://... +fly secrets set API_KEY=secret123 + +# Посмотреть +fly secrets list +``` + +### Регионы + +```bash +# Добавить регион +fly regions add lhr # London + +# Посмотреть регионы +fly regions list +``` + +--- + +## Railway + +### Развертывание через CLI + +```bash +# Установить Railway CLI +npm install -g @railway/cli + +# Войти +railway login + +# Инициализировать +railway init + +# Связать с проектом +railway link + +# Развернуть +railway up + +# Посмотреть логи +railway logs +``` + +### Развертывание через Git + +1. Создать проект на railway.app +2. Подключить GitHub репозиторий +3. Railway автоматически деплоит при push +4. Конфигурация берется из `railway.toml` + +### Environment Variables + +```bash +# Добавить через CLI +railway variables set NODE_ENV=production +railway variables set LOG_LEVEL=info + +# Или через dashboard на railway.app +``` + +--- + +## Render + +### Развертывание через dashboard + +1. Создать аккаунт на render.com +2. New > Web Service +3. Подключить GitHub репозиторий +4. Render автоматически находит `render.yaml` +5. Deploy + +### Развертывание через CLI (опционально) + +```bash +# Установить render-cli (если нужно) +npm install -g render-cli + +# Войти +render login + +# Развернуть +render deploy +``` + +### Environment Variables + +Добавить в dashboard или в `render.yaml`: + +```yaml +envVars: + - key: DATABASE_URL + sync: false # Не коммитить в git + - key: API_KEY + generateValue: true # Сгенерировать случайное значение +``` + +--- + +## Сравнение платформ + +| Критерий | Docker | K8s | Lambda | Fly.io | Railway | Render | +|----------|--------|-----|--------|--------|---------|--------| +| **Сложность** | 🟢 Low | 🔴 High | 🟡 Medium | 🟢 Low | 🟢 Low | 🟢 Low | +| **Контроль** | 🟡 Medium | 🟢 Full | 🔴 Limited | 🟡 Medium | 🔴 Limited | 🔴 Limited | +| **Стоимость** | 💰 Self-hosted | 💰💰💰 High | 💰 Pay-per-use | 💰💰 Medium | 💰 Free tier | 💰 Free tier | +| **Масштабирование** | Manual | Auto | Auto | Auto | Auto | Auto | +| **Cold starts** | ❌ No | ❌ No | ✅ Yes | 🟡 Optional | ✅ Yes | ❌ No | +| **Best for** | Dev/Self-hosted | Enterprise | Sporadic load | Apps | Prototypes | Web apps | + +--- + +## Production Checklist + +Перед production деплоем: + +### Код + +- [ ] Build успешно: `pnpm build` +- [ ] Linter проходит: `pnpm lint` +- [ ] Тесты зеленые: `pnpm test` +- [ ] Нет TODO/FIXME в критичных местах + +### Конфигурация + +- [ ] Environment variables настроены +- [ ] Secrets для чувствительных данных +- [ ] Health checks работают (`/health`, `/ready`) +- [ ] Graceful shutdown реализован +- [ ] CORS правильно настроен + +### Мониторинг + +- [ ] Logging настроен (structured logs) +- [ ] Error tracking (Sentry/Datadog) +- [ ] OpenTelemetry экспортирует traces +- [ ] Metrics endpoint доступен +- [ ] Alerts настроены + +### Безопасность + +- [ ] Rate limiting включен +- [ ] Input validation через Zod +- [ ] HTTPS включен +- [ ] Security headers настроены +- [ ] Secrets не в git + +### Performance + +- [ ] Caching strategy определена +- [ ] Database connection pooling +- [ ] Compression включен +- [ ] CDN для статики (если есть) + +### Backup & Recovery + +- [ ] Backup strategy для данных +- [ ] Disaster recovery план +- [ ] Rollback strategy протестирован + +--- + +## Troubleshooting + +### Проблема: Container не запускается + +```bash +# Проверить логи +docker logs + +# Запустить интерактивно +docker run -it tsdev-api:latest sh + +# Проверить health check +docker inspect --format='{{json .State.Health}}' +``` + +### Проблема: High memory usage + +```bash +# Node.js heap limit +docker run -e NODE_OPTIONS="--max-old-space-size=512" tsdev-api:latest + +# Kubernetes limits +# Установить в deployment.yaml: +resources: + limits: + memory: 512Mi +``` + +### Проблема: Slow cold starts (Lambda) + +- Используйте provisioned concurrency +- Минимизируйте bundle size +- Lazy load зависимостей +- Кэшируйте registry между вызовами + +### Проблема: 504 Gateway Timeout + +- Увеличьте timeout в ingress/load balancer +- Оптимизируйте медленные процедуры +- Используйте async processing для длинных задач + +--- + +## Дополнительные ресурсы + +- [Docker Best Practices](https://docs.docker.com/develop/dev-best-practices/) +- [Kubernetes Deployment Strategies](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Serverless Framework Docs](https://www.serverless.com/framework/docs) +- [Fly.io Docs](https://fly.io/docs/) +- [Railway Docs](https://docs.railway.app/) +- [Render Docs](https://render.com/docs) diff --git a/examples/basic/docker-compose.yml b/examples/basic/docker-compose.yml new file mode 100644 index 0000000..d28152f --- /dev/null +++ b/examples/basic/docker-compose.yml @@ -0,0 +1,77 @@ +version: '3.8' + +services: + api: + build: + context: ../.. + dockerfile: examples/basic/Dockerfile + target: production + ports: + - "3000:3000" + environment: + - NODE_ENV=production + - PORT=3000 + - LOG_LEVEL=info + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3000/procedures"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 10s + restart: unless-stopped + networks: + - tsdev + + # Optional: OpenTelemetry Collector + otel-collector: + image: otel/opentelemetry-collector-contrib:latest + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "4317:4317" # OTLP gRPC receiver + - "4318:4318" # OTLP HTTP receiver + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter metrics + - "13133:13133" # health_check extension + networks: + - tsdev + restart: unless-stopped + + # Optional: Jaeger for trace visualization + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" # Jaeger UI + - "14268:14268" # Jaeger collector HTTP + - "14250:14250" # Jaeger collector gRPC + environment: + - COLLECTOR_OTLP_ENABLED=true + - LOG_LEVEL=debug + networks: + - tsdev + restart: unless-stopped + + # Optional: Prometheus for metrics + prometheus: + image: prom/prometheus:latest + ports: + - "9090:9090" + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus-data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + networks: + - tsdev + restart: unless-stopped + +networks: + tsdev: + driver: bridge + +volumes: + prometheus-data: diff --git a/examples/basic/fly.toml b/examples/basic/fly.toml new file mode 100644 index 0000000..3fdca8a --- /dev/null +++ b/examples/basic/fly.toml @@ -0,0 +1,66 @@ +# Fly.io configuration for tsdev API +# Deploy with: fly launch / fly deploy + +app = "tsdev-api" +primary_region = "iad" # US East (Virginia) + +# Kill signal (SIGINT or SIGTERM) +kill_signal = "SIGINT" +kill_timeout = 10 + +# Processes +[processes] + app = "node --import tsx/esm src/apps/http.ts" + +# Build configuration +[build] + [build.args] + NODE_VERSION = "20" + +# Environment variables +[env] + NODE_ENV = "production" + PORT = "8080" + LOG_LEVEL = "info" + +# HTTP service +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = "stop" # Automatically stop when idle + auto_start_machines = true # Automatically start when needed + min_machines_running = 0 # Can scale to 0 + processes = ["app"] + + # Concurrency limits + [http_service.concurrency] + type = "connections" + hard_limit = 25 + soft_limit = 20 + +# Health checks +[[http_service.checks]] + grace_period = "10s" + interval = "30s" + method = "GET" + timeout = "5s" + path = "/health" + +# VM resources +[[vm]] + cpu_kind = "shared" + cpus = 1 + memory_mb = 256 + +# Autoscaling +[metrics] + port = 9091 + path = "/metrics" + +# Regions to deploy (multi-region) +# [[regions]] +# name = "iad" # US East +# [[regions]] +# name = "lhr" # London +# [[regions]] +# name = "nrt" # Tokyo diff --git a/examples/basic/k8s/README.md b/examples/basic/k8s/README.md new file mode 100644 index 0000000..a921433 --- /dev/null +++ b/examples/basic/k8s/README.md @@ -0,0 +1,203 @@ +# Kubernetes Deployment Guide + +Инструкции по развертыванию tsdev API в Kubernetes. + +## Prerequisites + +- Kubernetes cluster (1.25+) +- kubectl configured +- NGINX Ingress Controller установлен +- cert-manager для автоматических SSL сертификатов (опционально) + +## Быстрый старт + +### 1. Создать namespace + +```bash +kubectl create namespace tsdev +kubectl config set-context --current --namespace=tsdev +``` + +### 2. Создать secret для Docker registry (если используете приватный registry) + +```bash +kubectl create secret docker-registry github-registry \ + --docker-server=ghcr.io \ + --docker-username=YOUR_USERNAME \ + --docker-password=YOUR_PAT \ + --docker-email=YOUR_EMAIL +``` + +### 3. Применить конфигурацию + +```bash +# Развернуть приложение +kubectl apply -f deployment.yaml + +# Настроить ingress +kubectl apply -f ingress.yaml +``` + +### 4. Проверить развертывание + +```bash +# Проверить pods +kubectl get pods -l app=tsdev-api + +# Проверить сервис +kubectl get svc tsdev-api + +# Проверить ingress +kubectl get ingress tsdev-api + +# Логи +kubectl logs -l app=tsdev-api -f + +# Описание pod (для debugging) +kubectl describe pod -l app=tsdev-api +``` + +## Мониторинг + +### Проверить метрики HPA + +```bash +kubectl get hpa tsdev-api-hpa +kubectl describe hpa tsdev-api-hpa +``` + +### Посмотреть events + +```bash +kubectl get events --sort-by=.metadata.creationTimestamp +``` + +### Port forwarding для локального тестирования + +```bash +kubectl port-forward svc/tsdev-api 3000:80 + +# Тестировать +curl http://localhost:3000/procedures +``` + +## Обновление приложения + +### Rolling update + +```bash +# Обновить образ +kubectl set image deployment/tsdev-api \ + api=ghcr.io/your-org/tsdev-api:v2.0.0 + +# Или применить новый манифест +kubectl apply -f deployment.yaml + +# Проверить статус rollout +kubectl rollout status deployment/tsdev-api + +# Откатить при необходимости +kubectl rollout undo deployment/tsdev-api +``` + +## Масштабирование + +### Ручное масштабирование + +```bash +kubectl scale deployment tsdev-api --replicas=5 +``` + +### Auto-scaling уже настроен через HPA + +HPA автоматически масштабирует от 2 до 10 реплик на основе CPU/Memory. + +## Troubleshooting + +### Pod не запускается + +```bash +# Описание pod для деталей +kubectl describe pod + +# Логи +kubectl logs + +# Логи предыдущего контейнера (если pod перезапускался) +kubectl logs --previous + +# Exec в pod для debugging +kubectl exec -it -- sh +``` + +### Проблемы с health checks + +```bash +# Проверить endpoint напрямую +kubectl exec -it -- wget -O- http://localhost:3000/health + +# Временно отключить health checks +kubectl patch deployment tsdev-api -p '{"spec":{"template":{"spec":{"containers":[{"name":"api","livenessProbe":null,"readinessProbe":null}]}}}}' +``` + +### Проблемы с ingress + +```bash +# Проверить ingress controller логи +kubectl logs -n ingress-nginx -l app.kubernetes.io/component=controller + +# Проверить endpoints сервиса +kubectl get endpoints tsdev-api +``` + +## Secrets Management + +### Создать secrets для sensitive данных + +```bash +# Database URL +kubectl create secret generic tsdev-secrets \ + --from-literal=database-url='postgresql://user:pass@host:5432/db' + +# Добавить в deployment.yaml: +# env: +# - name: DATABASE_URL +# valueFrom: +# secretKeyRef: +# name: tsdev-secrets +# key: database-url +``` + +## Backup & Recovery + +### Backup конфигурации + +```bash +kubectl get all -o yaml > backup.yaml +kubectl get configmap tsdev-config -o yaml > configmap-backup.yaml +kubectl get secret -o yaml > secrets-backup.yaml +``` + +## Удаление + +```bash +# Удалить все ресурсы +kubectl delete -f deployment.yaml +kubectl delete -f ingress.yaml + +# Или удалить namespace целиком +kubectl delete namespace tsdev +``` + +## Production Checklist + +- [ ] Resource limits настроены +- [ ] Health checks работают +- [ ] HPA настроен +- [ ] Network policies применены +- [ ] Secrets для sensitive данных +- [ ] Logging настроен +- [ ] Monitoring/Alerting настроен +- [ ] Backup strategy определен +- [ ] SSL сертификаты настроены +- [ ] RBAC policies настроены diff --git a/examples/basic/k8s/deployment.yaml b/examples/basic/k8s/deployment.yaml new file mode 100644 index 0000000..9dfdc6a --- /dev/null +++ b/examples/basic/k8s/deployment.yaml @@ -0,0 +1,188 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tsdev-api + labels: + app: tsdev-api + version: v1 +spec: + replicas: 3 + selector: + matchLabels: + app: tsdev-api + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: tsdev-api + version: v1 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "3000" + prometheus.io/path: "/metrics" + spec: + # Security context + securityContext: + runAsNonRoot: true + runAsUser: 1001 + fsGroup: 1001 + + containers: + - name: api + image: ghcr.io/your-org/tsdev-api:latest + imagePullPolicy: Always + ports: + - name: http + containerPort: 3000 + protocol: TCP + + env: + - name: NODE_ENV + value: "production" + - name: PORT + value: "3000" + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: tsdev-config + key: log_level + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://otel-collector:4318" + - name: OTEL_SERVICE_NAME + value: "tsdev-api" + + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + + # Liveness probe - is the app alive? + livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + + # Readiness probe - is the app ready to serve traffic? + readinessProbe: + httpGet: + path: /ready + port: 3000 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + + # Startup probe - for slow-starting apps + startupProbe: + httpGet: + path: /health + port: 3000 + failureThreshold: 30 + periodSeconds: 10 + + # Security context for container + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Volume mounts + volumeMounts: + - name: tmp + mountPath: /tmp + - name: cache + mountPath: /app/.cache + + volumes: + - name: tmp + emptyDir: {} + - name: cache + emptyDir: {} + + # Termination grace period + terminationGracePeriodSeconds: 30 + + # Image pull secrets (if using private registry) + # imagePullSecrets: + # - name: github-registry +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tsdev-config +data: + log_level: "info" + rate_limit_window_ms: "60000" + rate_limit_max_requests: "100" +--- +apiVersion: v1 +kind: Service +metadata: + name: tsdev-api + labels: + app: tsdev-api +spec: + type: ClusterIP + selector: + app: tsdev-api + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + name: http + sessionAffinity: None +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: tsdev-api-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: tsdev-api + minReplicas: 2 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 50 + periodSeconds: 60 + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + - type: Pods + value: 2 + periodSeconds: 15 + selectPolicy: Max diff --git a/examples/basic/k8s/ingress.yaml b/examples/basic/k8s/ingress.yaml new file mode 100644 index 0000000..bb451d1 --- /dev/null +++ b/examples/basic/k8s/ingress.yaml @@ -0,0 +1,115 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: tsdev-api + annotations: + # TLS/SSL + cert-manager.io/cluster-issuer: "letsencrypt-prod" + + # NGINX specific + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + + # Rate limiting + nginx.ingress.kubernetes.io/rate-limit: "100" + nginx.ingress.kubernetes.io/limit-rps: "10" + + # CORS + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-methods: "GET, POST, OPTIONS" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + + # Timeouts + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-send-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "30" + + # Request size + nginx.ingress.kubernetes.io/proxy-body-size: "10m" + + # Security headers + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Frame-Options: DENY"; + more_set_headers "X-Content-Type-Options: nosniff"; + more_set_headers "X-XSS-Protection: 1; mode=block"; + more_set_headers "Referrer-Policy: strict-origin-when-cross-origin"; + +spec: + ingressClassName: nginx + + tls: + - hosts: + - api.yourdomain.com + secretName: tsdev-api-tls + + rules: + - host: api.yourdomain.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: tsdev-api + port: + number: 80 +--- +# Optional: Network Policy for security +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tsdev-api-netpol +spec: + podSelector: + matchLabels: + app: tsdev-api + policyTypes: + - Ingress + - Egress + + ingress: + # Allow from ingress controller + - from: + - namespaceSelector: + matchLabels: + name: ingress-nginx + ports: + - protocol: TCP + port: 3000 + + # Allow from same namespace (for service mesh) + - from: + - podSelector: {} + ports: + - protocol: TCP + port: 3000 + + egress: + # Allow DNS + - to: + - namespaceSelector: + matchLabels: + name: kube-system + ports: + - protocol: UDP + port: 53 + + # Allow to OTEL collector + - to: + - podSelector: + matchLabels: + app: otel-collector + ports: + - protocol: TCP + port: 4318 + + # Allow to external services (databases, etc) + - to: + - namespaceSelector: {} + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 5432 # PostgreSQL + - protocol: TCP + port: 6379 # Redis diff --git a/examples/basic/otel-collector-config.yaml b/examples/basic/otel-collector-config.yaml new file mode 100644 index 0000000..30ea5bb --- /dev/null +++ b/examples/basic/otel-collector-config.yaml @@ -0,0 +1,59 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +processors: + batch: + timeout: 10s + send_batch_size: 1024 + + memory_limiter: + check_interval: 1s + limit_mib: 512 + spike_limit_mib: 128 + +exporters: + # Export to Jaeger + otlp/jaeger: + endpoint: jaeger:4317 + tls: + insecure: true + + # Export metrics to Prometheus + prometheus: + endpoint: "0.0.0.0:8889" + + # Log to console for debugging + logging: + loglevel: info + + # Export to stdout for debugging + debug: + verbosity: detailed + +service: + pipelines: + traces: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [otlp/jaeger, logging] + + metrics: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [prometheus, logging] + + logs: + receivers: [otlp] + processors: [memory_limiter, batch] + exporters: [logging] + + extensions: [health_check] + +extensions: + health_check: + endpoint: 0.0.0.0:13133 diff --git a/examples/basic/prometheus.yml b/examples/basic/prometheus.yml new file mode 100644 index 0000000..aa85542 --- /dev/null +++ b/examples/basic/prometheus.yml @@ -0,0 +1,12 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'otel-collector' + static_configs: + - targets: ['otel-collector:8888'] + + - job_name: 'prometheus-metrics' + static_configs: + - targets: ['otel-collector:8889'] diff --git a/examples/basic/railway.toml b/examples/basic/railway.toml new file mode 100644 index 0000000..3d08091 --- /dev/null +++ b/examples/basic/railway.toml @@ -0,0 +1,22 @@ +# Railway.app configuration +# Deploy with: railway up + +[build] +builder = "NIXPACKS" +buildCommand = "pnpm install && pnpm --filter @tsdev/core build && pnpm --filter @tsdev/adapters build" + +[deploy] +startCommand = "node --import tsx/esm src/apps/http.ts" +healthcheckPath = "/health" +healthcheckTimeout = 100 +restartPolicyType = "ON_FAILURE" +restartPolicyMaxRetries = 10 +numReplicas = 1 + +[[services]] +name = "api" + +[services.env] +NODE_ENV = "production" +PORT = "3000" +LOG_LEVEL = "info" diff --git a/examples/basic/render.yaml b/examples/basic/render.yaml new file mode 100644 index 0000000..844a275 --- /dev/null +++ b/examples/basic/render.yaml @@ -0,0 +1,38 @@ +# Render.com configuration +# Deploy via dashboard or: render-cli deploy + +services: + - type: web + name: tsdev-api + runtime: node + region: oregon + plan: starter # free, starter, standard, pro + branch: main + + # Build + buildCommand: pnpm install && pnpm --filter @tsdev/core build && pnpm --filter @tsdev/adapters build + + # Start + startCommand: node --import tsx/esm src/apps/http.ts + + # Health check + healthCheckPath: /health + + # Environment + envVars: + - key: NODE_ENV + value: production + - key: PORT + value: 3000 + - key: LOG_LEVEL + value: info + + # Auto-deploy on push + autoDeploy: true + + # Scaling (for paid plans) + # scaling: + # minInstances: 1 + # maxInstances: 3 + # targetMemoryPercent: 80 + # targetCPUPercent: 70 diff --git a/examples/basic/serverless.yml b/examples/basic/serverless.yml new file mode 100644 index 0000000..e0e4d49 --- /dev/null +++ b/examples/basic/serverless.yml @@ -0,0 +1,200 @@ +# Serverless Framework configuration for AWS Lambda +service: tsdev-api + +frameworkVersion: '3' + +provider: + name: aws + runtime: nodejs20.x + region: ${opt:region, 'us-east-1'} + stage: ${opt:stage, 'dev'} + memorySize: 512 + timeout: 30 + + environment: + NODE_ENV: production + STAGE: ${self:provider.stage} + LOG_LEVEL: ${env:LOG_LEVEL, 'info'} + + iam: + role: + statements: + # CloudWatch Logs + - Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + Resource: + - 'arn:aws:logs:*:*:*' + + # X-Ray tracing + - Effect: Allow + Action: + - xray:PutTraceSegments + - xray:PutTelemetryRecords + Resource: '*' + +functions: + api: + handler: dist/lambda-handler.handler + description: tsdev API Lambda handler + events: + # HTTP API Gateway v2 (cheaper, recommended) + - httpApi: + path: /{proxy+} + method: ANY + + # GET /procedures + - httpApi: + path: /procedures + method: GET + + # Environment variables per function + environment: + FUNCTION_NAME: api + + # Reserved concurrency (optional, prevents runaway costs) + # reservedConcurrency: 10 + + # Provisioned concurrency (for low latency, costs more) + # provisionedConcurrency: 2 + + # VPC config (if accessing RDS, ElastiCache, etc) + # vpc: + # securityGroupIds: + # - sg-xxxxx + # subnetIds: + # - subnet-xxxxx + # - subnet-yyyyy + + # Separate function for workflows (if needed) + workflow: + handler: dist/workflow-lambda.handler + description: Workflow execution handler + timeout: 60 # Workflows may take longer + memorySize: 1024 + events: + - httpApi: + path: /workflow/{proxy+} + method: ANY + +# Package configuration +package: + individually: true + patterns: + # Include + - 'dist/**' + - 'node_modules/**' + # Exclude + - '!node_modules/.cache/**' + - '!node_modules/*/test/**' + - '!node_modules/*/tests/**' + - '!.git/**' + - '!.github/**' + - '!src/**' + - '!*.md' + +# Plugins +plugins: + - serverless-esbuild + - serverless-offline # For local testing + +custom: + # esbuild configuration + esbuild: + bundle: true + minify: true + sourcemap: true + target: 'node20' + platform: 'node' + format: 'cjs' + external: + - '@aws-sdk/*' + exclude: + - 'aws-sdk' + + # Serverless Offline configuration (local development) + serverless-offline: + httpPort: 3000 + lambdaPort: 3002 + + # Stages configuration + stages: + dev: + memorySize: 256 + timeout: 15 + staging: + memorySize: 512 + timeout: 30 + production: + memorySize: 1024 + timeout: 30 + provisionedConcurrency: 2 + +# CloudFormation resources +resources: + Resources: + # API Gateway Custom Domain (optional) + # ApiDomainName: + # Type: AWS::ApiGatewayV2::DomainName + # Properties: + # DomainName: api.yourdomain.com + # DomainNameConfigurations: + # - EndpointType: REGIONAL + # CertificateArn: arn:aws:acm:region:account:certificate/xxxxx + + # CloudWatch Log Group with retention + ApiLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: /aws/lambda/${self:service}-${self:provider.stage}-api + RetentionInDays: 14 + + WorkflowLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: /aws/lambda/${self:service}-${self:provider.stage}-workflow + RetentionInDays: 14 + + # CloudWatch Alarms + ApiErrorsAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmDescription: Alert when API errors exceed threshold + MetricName: Errors + Namespace: AWS/Lambda + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 10 + ComparisonOperator: GreaterThanThreshold + Dimensions: + - Name: FunctionName + Value: ${self:service}-${self:provider.stage}-api + + ApiThrottlesAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmDescription: Alert when API throttles occur + MetricName: Throttles + Namespace: AWS/Lambda + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 5 + ComparisonOperator: GreaterThanThreshold + Dimensions: + - Name: FunctionName + Value: ${self:service}-${self:provider.stage}-api + + Outputs: + ApiUrl: + Description: API Gateway endpoint URL + Value: + Fn::Sub: https://${HttpApi}.execute-api.${AWS::Region}.amazonaws.com + + ApiId: + Description: API Gateway ID + Value: + Ref: HttpApi diff --git a/examples/basic/src/lambda-handler.ts b/examples/basic/src/lambda-handler.ts new file mode 100644 index 0000000..1d2f288 --- /dev/null +++ b/examples/basic/src/lambda-handler.ts @@ -0,0 +1,170 @@ +#!/usr/bin/env node +/** + * AWS Lambda handler for tsdev API + */ + +import { collectRegistry, executeProcedure, createExecutionContext } from '@tsdev/core'; +import type { APIGatewayProxyEvent, APIGatewayProxyResult, Context } from 'aws-lambda'; +import type { Registry } from '@tsdev/core'; + +// Cold start optimization: cache registry between invocations +let registry: Registry | null = null; + +/** + * Initialize registry (lazy loading) + */ +async function initRegistry(): Promise { + if (!registry) { + console.log('🔍 Cold start: Collecting procedures...'); + const startTime = Date.now(); + + registry = await collectRegistry('./handlers'); + + const duration = Date.now() - startTime; + console.log(`✅ Registry initialized in ${duration}ms with ${registry.size} procedures`); + } + return registry; +} + +/** + * Parse request body + */ +function parseBody(body: string | null): unknown { + if (!body) return {}; + + try { + return JSON.parse(body); + } catch (error) { + throw new Error('Invalid JSON in request body'); + } +} + +/** + * Create success response + */ +function successResponse(data: unknown, statusCode = 200): APIGatewayProxyResult { + return { + statusCode, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Methods': 'GET,POST,OPTIONS', + 'Access-Control-Allow-Headers': 'Content-Type', + }, + body: JSON.stringify(data), + }; +} + +/** + * Create error response + */ +function errorResponse(message: string, statusCode = 500): APIGatewayProxyResult { + return { + statusCode, + headers: { + 'Content-Type': 'application/json', + 'Access-Control-Allow-Origin': '*', + }, + body: JSON.stringify({ error: message }), + }; +} + +/** + * Main Lambda handler + */ +export async function handler( + event: APIGatewayProxyEvent, + context: Context +): Promise { + const startTime = Date.now(); + + try { + // Initialize registry + const reg = await initRegistry(); + + const path = event.path || event.rawPath || '/'; + const method = event.httpMethod || event.requestContext?.http?.method || 'GET'; + + console.log(`📥 ${method} ${path}`, { + requestId: context.requestId, + functionVersion: context.functionVersion, + }); + + // CORS preflight + if (method === 'OPTIONS') { + return successResponse({ message: 'OK' }, 200); + } + + // Health check + if (path === '/health' && method === 'GET') { + return successResponse({ + status: 'healthy', + timestamp: new Date().toISOString(), + procedures: reg.size, + coldStart: Date.now() - startTime < 1000, + version: process.env.npm_package_version || 'unknown', + }); + } + + // Introspection: GET /procedures + if (path === '/procedures' && method === 'GET') { + const procedures = Array.from(reg.entries()).map(([name, proc]) => ({ + name, + description: proc.contract.description, + metadata: proc.contract.metadata, + })); + + return successResponse({ procedures }); + } + + // RPC execution: POST /rpc/:procedureName + if (path.startsWith('/rpc/') && method === 'POST') { + const procedureName = path.slice(5); + const procedure = reg.get(procedureName); + + if (!procedure) { + return errorResponse(`Procedure '${procedureName}' not found`, 404); + } + + // Parse input + const input = parseBody(event.body); + + // Create execution context + const execContext = createExecutionContext({ + transport: 'lambda', + requestId: context.requestId, + functionName: context.functionName, + functionVersion: context.functionVersion, + awsRequestId: event.requestContext.requestId, + sourceIp: event.requestContext.identity?.sourceIp, + userAgent: event.requestContext.identity?.userAgent, + }); + + // Execute procedure + const result = await executeProcedure(procedure, input, execContext); + + const duration = Date.now() - startTime; + console.log(`✅ Procedure executed in ${duration}ms`); + + return successResponse(result); + } + + // Not found + return errorResponse('Not found', 404); + + } catch (error) { + console.error('❌ Lambda error:', error); + + // Zod validation errors + if (error && typeof error === 'object' && 'issues' in error) { + return errorResponse( + 'Validation error: ' + JSON.stringify(error), + 400 + ); + } + + // Generic errors + const message = error instanceof Error ? error.message : 'Internal server error'; + return errorResponse(message, 500); + } +} diff --git a/packages/adapters/src/http.ts b/packages/adapters/src/http.ts index fb2477b..2e90348 100644 --- a/packages/adapters/src/http.ts +++ b/packages/adapters/src/http.ts @@ -21,12 +21,51 @@ export function createHttpServer(registry: Registry, port = 3000) { return; } - // Health check - if (req.url === "/health" && req.method === "GET") { - res.writeHead(200, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ status: "ok" })); - return; - } + // Health check (liveness probe) + if (req.url === "/health" && req.method === "GET") { + res.writeHead(200, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ + status: "healthy", + timestamp: new Date().toISOString(), + uptime: process.uptime(), + version: process.env.npm_package_version || "unknown", + })); + return; + } + + // Readiness check (readiness probe) + if (req.url === "/ready" && req.method === "GET") { + // Check if registry is ready + const isReady = registry.size > 0; + const statusCode = isReady ? 200 : 503; + + res.writeHead(statusCode, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ + ready: isReady, + procedures: registry.size, + timestamp: new Date().toISOString(), + })); + return; + } + + // Detailed status (for monitoring) + if (req.url === "/status" && req.method === "GET") { + res.writeHead(200, { "Content-Type": "application/json" }); + res.end(JSON.stringify({ + status: "operational", + timestamp: new Date().toISOString(), + uptime: process.uptime(), + memory: process.memoryUsage(), + version: process.env.npm_package_version || "unknown", + nodeVersion: process.version, + procedures: { + total: registry.size, + names: Array.from(registry.keys()), + }, + environment: process.env.NODE_ENV || "development", + })); + return; + } // List all procedures (introspection endpoint) if (req.url === "/procedures" && req.method === "GET") {