diff --git a/.env.db.example b/.env.db.example index c71aa94..777bf45 100644 --- a/.env.db.example +++ b/.env.db.example @@ -1,4 +1,3 @@ -DB_NAME=nginx_waf -DB_USER=postgres -DB_PASSWORD=postgres -DB_PORT=5432 \ No newline at end of file +# Database Configuration for SQLite +# The database file will be created at apps/api/prisma/nginx_waf.db +DATABASE_URL="file:./nginx_waf.db" \ No newline at end of file diff --git a/.env.example b/.env.example index 4f96529..61c1bf2 100644 --- a/.env.example +++ b/.env.example @@ -1,9 +1,6 @@ # Database Configuration -DATABASE_URL="postgresql://postgres:postgres@localhost:5432/nginx_waf?schema=public" +DATABASE_URL="file:./dev.db" -DB_NAME=nginx_waf -DB_USER=postgres -DB_PASSWORD=postgres # Server Configuration PORT=3001 NODE_ENV=production diff --git a/.gitignore b/.gitignore index 594bbbf..81fc5ba 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,14 @@ package-lock.json bun.lockb yarn.lock +# Database files (SQLite) +*.db +*.db-journal +*.db-shm +*.db-wal +apps/api/prisma/*.db +apps/api/prisma/*.db-* + # Editor directories and files .vscode/* !.vscode/extensions.json diff --git a/README.md b/README.md index 4622797..b8c6c0f 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ Nginx WAF - Advanced Nginx Management Platform offers full support for major ope - 🛡️ **Access Control Lists (ACL)** - IP whitelist/blacklist, GeoIP, User-Agent filtering - 📋 **Activity Logging** - Comprehensive audit trail - 🔔 **Smart Alerts** - Email/Telegram notifications with custom conditions -- 💾 **Database Management** - PostgreSQL with Prisma ORM +- 💾 **Database Management** - SQLite with Prisma ORM (no Docker required) - 🎨 **Modern UI** - React + TypeScript + ShadCN UI + Tailwind CSS ## 📦 Quick Start @@ -40,6 +40,7 @@ Nginx WAF - Advanced Nginx Management Platform offers full support for major ope | **New Server (Production)** | `./scripts/deploy.sh` | Full installation of Nginx + ModSecurity + Backend + Frontend with systemd services | | **Development/Testing** | `./scripts/quickstart.sh` | Quick run in dev mode (no Nginx installation, no root required) | | **Upgrade New Version** | `./scripts/update.sh` | Full update to new version | +| **Migrate PostgreSQL → SQLite** | `./scripts/migrate-postgres-to-sqlite.sh` | Migrate existing PostgreSQL data to SQLite (see [Migration Guide](docs/MIGRATION_POSTGRES_TO_SQLITE.md)) | | Use Case | Port | Description | |----------|--------|-------------| @@ -69,6 +70,32 @@ git pull bash scripts/update.sh ``` +### 🔄 Migrating from PostgreSQL to SQLite + +If you have an existing installation using PostgreSQL and want to migrate to SQLite: + +```bash +# Navigate to your nginx-love directory +cd nginx-love + +# Run the migration script (requires root) +sudo bash scripts/migrate-postgres-to-sqlite.sh +``` + +**What the migration script does:** +- ✅ Exports all data from PostgreSQL (users, domains, SSL certificates, rules, etc.) +- ✅ Creates a new SQLite database +- ✅ Imports all data with proper type conversions +- ✅ Backs up your original configuration +- ✅ Provides rollback instructions if needed + +**After migration:** +1. Restart services: `sudo systemctl restart nginx-love-backend nginx-love-frontend` +2. Verify all data is present in the web interface +3. Optionally remove PostgreSQL: See [Migration Guide](docs/MIGRATION_POSTGRES_TO_SQLITE.md) + +📖 **Full Migration Guide**: [docs/MIGRATION_POSTGRES_TO_SQLITE.md](docs/MIGRATION_POSTGRES_TO_SQLITE.md) + ### 🖥️ Production Deployment (Docker container) ## Environment Setup @@ -88,10 +115,6 @@ Before running the application, you need to set up your environment variables: | `JWT_REFRESH_SECRET` | Secret key for JWT refresh tokens | `your-random-secret-key-32-chars` | ✅ Yes | | `SESSION_SECRET` | Secret key for session management | `your-random-secret-key-32-chars` | ✅ Yes | | `VITE_API_URL` | Backend API URL for frontend | `http://YOUR_SERVER_IP:3001/api` | ✅ Yes | - | `DB_NAME` | PostgreSQL database name | `nginx_waf` | ✅ Yes | - | `DB_USER` | PostgreSQL database user | `postgres` | ✅ Yes | - | `DB_PASSWORD` | PostgreSQL database password | `postgres` | ✅ Yes | - | `POSTGRES_INITDB_ARGS` | PostgreSQL initialization arguments | `--encoding=UTF-8 --lc-collate=C --lc-ctype=C` | ⚠️ Optional | | `CORS_ORIGIN` | Allowed CORS origins (comma-separated) | `http://YOUR_SERVER_IP:8080,http://localhost:8080` | ✅ Yes | **Security Note**: Generate strong random secrets using: @@ -99,6 +122,8 @@ Before running the application, you need to set up your environment variables: openssl rand -base64 32 ``` + **Database Note**: SQLite is used by default (file-based, no separate server needed). The database file will be created automatically at `apps/api/prisma/nginx_waf.db`. + 2. Edit the `.env` file and configure the necessary environment variables according to your local setup. @@ -155,10 +180,9 @@ Currently, automatic upgrades are **not supported** for Docker Compose deploymen The script will **automatically install everything**: - ✅ Node.js 20.x (if not present) - ✅ pnpm 8.15.0 (if not present) -- ✅ Docker + Docker Compose (if not present) -- ✅ PostgreSQL 15 container (auto-generated credentials) - ✅ Nginx + ModSecurity + OWASP CRS - ✅ Backend API + Frontend (production build) +- ✅ SQLite database (file-based, no Docker required) - ✅ Systemd services with auto-start - ✅ CORS configuration with Public IP @@ -177,7 +201,7 @@ cd nginx-love This will: - Install dependencies -- Start PostgreSQL in Docker (optional) +- Create SQLite database file automatically - Run database migrations and seeding - Start backend on http://localhost:3001 - Start frontend on http://localhost:8080 (dev mode) @@ -289,7 +313,7 @@ Password: admin123 - **API Documentation**: OpenAPI/Swagger ### Infrastructure -- **Database**: PostgreSQL 15 (Docker) +- **Database**: SQLite 3 (file-based, no server required) - **Web Server**: Nginx + ModSecurity 3.x - **SSL**: Let's Encrypt (acme.sh) + Manual certificates - **WAF**: OWASP ModSecurity Core Rule Set (CRS) @@ -317,9 +341,9 @@ Password: admin123 ▼ ┌─────────────────┐ │ │ -│ PostgreSQL │ -│ Database │ -│ Port: 5432 │ +│ SQLite DB │ +│ (File-based) │ +│ nginx_waf.db │ └─────────────────┘ ``` @@ -336,17 +360,16 @@ Password: admin123 - **Alert System**: Configurable alerts with multi-channel notifications - **Activity Logs**: Comprehensive audit trail +**Database**: SQLite 3 (file-based at `apps/api/prisma/nginx_waf.db`) +- No Docker required +- No PostgreSQL installation needed +- Simple backup: just copy the `.db` file + ## 🔧 Service Management ### Production (systemd services) ```bash -# PostgreSQL Database -docker start nginx-love-postgres -docker stop nginx-love-postgres -docker restart nginx-love-postgres -docker logs -f nginx-love-postgres - # Backend API Service sudo systemctl start nginx-love-backend sudo systemctl stop nginx-love-backend @@ -368,6 +391,24 @@ sudo nginx -t # Test configuration sudo nginx -s reload # Reload configuration ``` +### Database Management + +```bash +# Backup database +sudo cp /path/to/apps/api/prisma/nginx_waf.db /path/to/backup/nginx_waf.db.backup + +# Restore database +sudo cp /path/to/backup/nginx_waf.db.backup /path/to/apps/api/prisma/nginx_waf.db +sudo systemctl restart nginx-love-backend + +# View database (using sqlite3 CLI) +sqlite3 /path/to/apps/api/prisma/nginx_waf.db +# .tables # List all tables +# .schema users # Show table structure +# SELECT * FROM users LIMIT 5; # Query data +# .quit # Exit +``` + ### Development Environment ```bash @@ -406,7 +447,6 @@ tail -f /var/log/nginx-love-backend.log # Backend log file tail -f /var/log/nginx-love-frontend.log # Frontend log file # System logs -docker logs -f nginx-love-postgres # Database logs tail -f /var/log/nginx/access.log # Nginx access logs tail -f /var/log/nginx/error.log # Nginx error logs tail -f /var/log/modsec_audit.log # ModSecurity audit logs @@ -426,9 +466,6 @@ tail -f /tmp/frontend.log # Frontend development logs cd apps/api && pnpm dev # Shows real-time backend logs cd apps/web && pnpm dev # Shows real-time frontend logs + HMR -# Database logs -docker logs -f nginx-love-postgres - # Combined log viewing multitail /tmp/backend.log /tmp/frontend.log ``` @@ -440,7 +477,6 @@ multitail /tmp/backend.log /tmp/frontend.log # Check what's using ports sudo netstat -tulnp | grep :3001 # Backend port sudo netstat -tulnp | grep :8080 # Frontend port (dev & prod) -sudo netstat -tulnp | grep :5432 # PostgreSQL port # Kill processes on specific ports sudo lsof -ti:3001 | xargs kill -9 # Backend @@ -454,22 +490,28 @@ sudo fuser -k 8080/tcp ### Database Issues ```bash -# Check PostgreSQL container -docker ps | grep postgres -docker container inspect nginx-love-postgres +# Check database file +ls -lh apps/api/prisma/nginx_waf.db +sqlite3 apps/api/prisma/nginx_waf.db ".tables" -# Check database connectivity +# Reset database (WARNING: deletes all data) cd apps/api -pnpm prisma db push --force-reset # Reset database -pnpm prisma generate # Regenerate client -pnpm prisma migrate reset # Reset migrations +rm -f prisma/nginx_waf.db prisma/nginx_waf.db-journal +pnpm prisma migrate dev # Recreate and migrate +pnpm prisma:seed # Reseed with initial data + +# Regenerate Prisma client +pnpm prisma generate # Check environment variables cat apps/api/.env | grep DATABASE_URL cd apps/api && node -e "console.log(process.env.DATABASE_URL)" -# Direct database connection test -docker exec -it nginx-love-postgres psql -U nginx_love_user -d nginx_love_db +# Backup database +cp apps/api/prisma/nginx_waf.db apps/api/prisma/nginx_waf.db.backup-$(date +%Y%m%d) + +# Restore database +cp apps/api/prisma/nginx_waf.db.backup-YYYYMMDD apps/api/prisma/nginx_waf.db ``` ### Nginx Configuration Issues @@ -497,13 +539,9 @@ free -h # Check application memory usage ps aux | grep node | grep -v grep -docker stats nginx-love-postgres -# Database performance -docker exec -it nginx-love-postgres psql -U nginx_love_user -d nginx_love_db -c " -SELECT schemaname,tablename,attname,n_distinct,correlation -FROM pg_stats WHERE tablename IN ('domains','users','performance_metrics'); -" +# Database file size +du -h apps/api/prisma/nginx_waf.db ``` ### Common Error Solutions @@ -517,11 +555,17 @@ sudo kill -9 **Error: "Database connection failed"** ```bash -# Restart PostgreSQL container -docker restart nginx-love-postgres -# Wait 10 seconds for startup -sleep 10 -cd apps/api && pnpm dev +# Check if database file exists +ls -l apps/api/prisma/nginx_waf.db + +# Check DATABASE_URL environment variable +cat apps/api/.env | grep DATABASE_URL + +# Recreate database if corrupted +cd apps/api +rm -f prisma/nginx_waf.db prisma/nginx_waf.db-journal +pnpm prisma migrate dev +pnpm prisma:seed ``` **Error: "ModSecurity failed to load"** @@ -552,9 +596,9 @@ cd nginx-love pnpm install # 3. Setup database -docker-compose -f docker-compose.db.yml up -d cd apps/api cp .env.example .env # Configure environment variables +# Edit .env and set DATABASE_URL="file:./nginx_waf.db" pnpm prisma:migrate # Run database migrations pnpm prisma:seed # Seed initial data diff --git a/apps/api/prisma/migrations/20250930140957_initial_setup/migration.sql b/apps/api/prisma/migrations/20250930140957_initial_setup/migration.sql deleted file mode 100644 index c32bfdb..0000000 --- a/apps/api/prisma/migrations/20250930140957_initial_setup/migration.sql +++ /dev/null @@ -1,149 +0,0 @@ --- CreateEnum -CREATE TYPE "UserRole" AS ENUM ('admin', 'moderator', 'viewer'); - --- CreateEnum -CREATE TYPE "UserStatus" AS ENUM ('active', 'inactive', 'suspended'); - --- CreateEnum -CREATE TYPE "ActivityType" AS ENUM ('login', 'logout', 'config_change', 'user_action', 'security'); - --- CreateTable -CREATE TABLE "users" ( - "id" TEXT NOT NULL, - "username" TEXT NOT NULL, - "email" TEXT NOT NULL, - "password" TEXT NOT NULL, - "fullName" TEXT NOT NULL, - "role" "UserRole" NOT NULL DEFAULT 'viewer', - "status" "UserStatus" NOT NULL DEFAULT 'active', - "avatar" TEXT, - "phone" TEXT, - "timezone" TEXT NOT NULL DEFAULT 'Asia/Ho_Chi_Minh', - "language" TEXT NOT NULL DEFAULT 'en', - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - "lastLogin" TIMESTAMP(3), - - CONSTRAINT "users_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "user_profiles" ( - "id" TEXT NOT NULL, - "userId" TEXT NOT NULL, - "bio" TEXT, - "location" TEXT, - "website" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "user_profiles_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "two_factor_auth" ( - "id" TEXT NOT NULL, - "userId" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT false, - "method" TEXT NOT NULL DEFAULT 'totp', - "secret" TEXT, - "backupCodes" TEXT[], - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "two_factor_auth_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "activity_logs" ( - "id" TEXT NOT NULL, - "userId" TEXT NOT NULL, - "action" TEXT NOT NULL, - "type" "ActivityType" NOT NULL, - "ip" TEXT NOT NULL, - "userAgent" TEXT NOT NULL, - "details" TEXT, - "success" BOOLEAN NOT NULL DEFAULT true, - "timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "activity_logs_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "refresh_tokens" ( - "id" TEXT NOT NULL, - "userId" TEXT NOT NULL, - "token" TEXT NOT NULL, - "expiresAt" TIMESTAMP(3) NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "revokedAt" TIMESTAMP(3), - - CONSTRAINT "refresh_tokens_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "user_sessions" ( - "id" TEXT NOT NULL, - "userId" TEXT NOT NULL, - "sessionId" TEXT NOT NULL, - "ip" TEXT NOT NULL, - "userAgent" TEXT NOT NULL, - "device" TEXT, - "location" TEXT, - "lastActive" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "expiresAt" TIMESTAMP(3) NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "user_sessions_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "users_username_key" ON "users"("username"); - --- CreateIndex -CREATE UNIQUE INDEX "users_email_key" ON "users"("email"); - --- CreateIndex -CREATE UNIQUE INDEX "user_profiles_userId_key" ON "user_profiles"("userId"); - --- CreateIndex -CREATE UNIQUE INDEX "two_factor_auth_userId_key" ON "two_factor_auth"("userId"); - --- CreateIndex -CREATE INDEX "activity_logs_userId_timestamp_idx" ON "activity_logs"("userId", "timestamp"); - --- CreateIndex -CREATE INDEX "activity_logs_type_timestamp_idx" ON "activity_logs"("type", "timestamp"); - --- CreateIndex -CREATE UNIQUE INDEX "refresh_tokens_token_key" ON "refresh_tokens"("token"); - --- CreateIndex -CREATE INDEX "refresh_tokens_userId_idx" ON "refresh_tokens"("userId"); - --- CreateIndex -CREATE INDEX "refresh_tokens_token_idx" ON "refresh_tokens"("token"); - --- CreateIndex -CREATE UNIQUE INDEX "user_sessions_sessionId_key" ON "user_sessions"("sessionId"); - --- CreateIndex -CREATE INDEX "user_sessions_userId_idx" ON "user_sessions"("userId"); - --- CreateIndex -CREATE INDEX "user_sessions_sessionId_idx" ON "user_sessions"("sessionId"); - --- AddForeignKey -ALTER TABLE "user_profiles" ADD CONSTRAINT "user_profiles_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "two_factor_auth" ADD CONSTRAINT "two_factor_auth_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "activity_logs" ADD CONSTRAINT "activity_logs_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "refresh_tokens" ADD CONSTRAINT "refresh_tokens_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "user_sessions" ADD CONSTRAINT "user_sessions_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20250930155130_add_domain_management/migration.sql b/apps/api/prisma/migrations/20250930155130_add_domain_management/migration.sql deleted file mode 100644 index 2bee513..0000000 --- a/apps/api/prisma/migrations/20250930155130_add_domain_management/migration.sql +++ /dev/null @@ -1,167 +0,0 @@ --- CreateEnum -CREATE TYPE "DomainStatus" AS ENUM ('active', 'inactive', 'error'); - --- CreateEnum -CREATE TYPE "UpstreamStatus" AS ENUM ('up', 'down', 'checking'); - --- CreateEnum -CREATE TYPE "LoadBalancerAlgorithm" AS ENUM ('round_robin', 'least_conn', 'ip_hash'); - --- CreateEnum -CREATE TYPE "SSLStatus" AS ENUM ('valid', 'expiring', 'expired'); - --- CreateTable -CREATE TABLE "domains" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "status" "DomainStatus" NOT NULL DEFAULT 'inactive', - "sslEnabled" BOOLEAN NOT NULL DEFAULT false, - "sslExpiry" TIMESTAMP(3), - "modsecEnabled" BOOLEAN NOT NULL DEFAULT true, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "domains_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "upstreams" ( - "id" TEXT NOT NULL, - "domainId" TEXT NOT NULL, - "host" TEXT NOT NULL, - "port" INTEGER NOT NULL, - "weight" INTEGER NOT NULL DEFAULT 1, - "maxFails" INTEGER NOT NULL DEFAULT 3, - "failTimeout" INTEGER NOT NULL DEFAULT 10, - "status" "UpstreamStatus" NOT NULL DEFAULT 'checking', - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "upstreams_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "load_balancer_configs" ( - "id" TEXT NOT NULL, - "domainId" TEXT NOT NULL, - "algorithm" "LoadBalancerAlgorithm" NOT NULL DEFAULT 'round_robin', - "healthCheckEnabled" BOOLEAN NOT NULL DEFAULT true, - "healthCheckInterval" INTEGER NOT NULL DEFAULT 30, - "healthCheckTimeout" INTEGER NOT NULL DEFAULT 5, - "healthCheckPath" TEXT NOT NULL DEFAULT '/', - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "load_balancer_configs_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "ssl_certificates" ( - "id" TEXT NOT NULL, - "domainId" TEXT NOT NULL, - "commonName" TEXT NOT NULL, - "sans" TEXT[], - "issuer" TEXT NOT NULL, - "certificate" TEXT NOT NULL, - "privateKey" TEXT NOT NULL, - "chain" TEXT, - "validFrom" TIMESTAMP(3) NOT NULL, - "validTo" TIMESTAMP(3) NOT NULL, - "autoRenew" BOOLEAN NOT NULL DEFAULT true, - "status" "SSLStatus" NOT NULL DEFAULT 'valid', - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "ssl_certificates_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "modsec_rules" ( - "id" TEXT NOT NULL, - "domainId" TEXT, - "name" TEXT NOT NULL, - "category" TEXT NOT NULL, - "ruleContent" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "description" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "modsec_rules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "nginx_configs" ( - "id" TEXT NOT NULL, - "configType" TEXT NOT NULL, - "name" TEXT NOT NULL, - "content" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "nginx_configs_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "installation_status" ( - "id" TEXT NOT NULL, - "component" TEXT NOT NULL, - "status" TEXT NOT NULL, - "step" TEXT, - "message" TEXT, - "progress" INTEGER NOT NULL DEFAULT 0, - "startedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "completedAt" TIMESTAMP(3), - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "installation_status_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "domains_name_key" ON "domains"("name"); - --- CreateIndex -CREATE INDEX "domains_name_idx" ON "domains"("name"); - --- CreateIndex -CREATE INDEX "domains_status_idx" ON "domains"("status"); - --- CreateIndex -CREATE INDEX "upstreams_domainId_idx" ON "upstreams"("domainId"); - --- CreateIndex -CREATE UNIQUE INDEX "load_balancer_configs_domainId_key" ON "load_balancer_configs"("domainId"); - --- CreateIndex -CREATE UNIQUE INDEX "ssl_certificates_domainId_key" ON "ssl_certificates"("domainId"); - --- CreateIndex -CREATE INDEX "ssl_certificates_domainId_idx" ON "ssl_certificates"("domainId"); - --- CreateIndex -CREATE INDEX "ssl_certificates_validTo_idx" ON "ssl_certificates"("validTo"); - --- CreateIndex -CREATE INDEX "modsec_rules_domainId_idx" ON "modsec_rules"("domainId"); - --- CreateIndex -CREATE INDEX "modsec_rules_category_idx" ON "modsec_rules"("category"); - --- CreateIndex -CREATE INDEX "nginx_configs_configType_idx" ON "nginx_configs"("configType"); - --- CreateIndex -CREATE UNIQUE INDEX "installation_status_component_key" ON "installation_status"("component"); - --- AddForeignKey -ALTER TABLE "upstreams" ADD CONSTRAINT "upstreams_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "load_balancer_configs" ADD CONSTRAINT "load_balancer_configs_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "ssl_certificates" ADD CONSTRAINT "ssl_certificates_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "modsec_rules" ADD CONSTRAINT "modsec_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20250930165732_add_upstream_https_support/migration.sql b/apps/api/prisma/migrations/20250930165732_add_upstream_https_support/migration.sql deleted file mode 100644 index 2febca3..0000000 --- a/apps/api/prisma/migrations/20250930165732_add_upstream_https_support/migration.sql +++ /dev/null @@ -1,3 +0,0 @@ --- AlterTable -ALTER TABLE "upstreams" ADD COLUMN "protocol" TEXT NOT NULL DEFAULT 'http', -ADD COLUMN "sslVerify" BOOLEAN NOT NULL DEFAULT true; diff --git a/apps/api/prisma/migrations/20251001083220_separate_crs_and_custom_rules/migration.sql b/apps/api/prisma/migrations/20251001083220_separate_crs_and_custom_rules/migration.sql deleted file mode 100644 index ef08f7d..0000000 --- a/apps/api/prisma/migrations/20251001083220_separate_crs_and_custom_rules/migration.sql +++ /dev/null @@ -1,63 +0,0 @@ -/* - Warnings: - - - You are about to drop the `modsec_rules` table. If the table is not empty, all the data it contains will be lost. - -*/ --- DropForeignKey -ALTER TABLE "modsec_rules" DROP CONSTRAINT "modsec_rules_domainId_fkey"; - --- DropTable -DROP TABLE "modsec_rules"; - --- CreateTable -CREATE TABLE "modsec_crs_rules" ( - "id" TEXT NOT NULL, - "domainId" TEXT, - "ruleFile" TEXT NOT NULL, - "name" TEXT NOT NULL, - "category" TEXT NOT NULL, - "description" TEXT, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "paranoia" INTEGER NOT NULL DEFAULT 1, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "modsec_crs_rules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "modsec_custom_rules" ( - "id" TEXT NOT NULL, - "domainId" TEXT, - "name" TEXT NOT NULL, - "category" TEXT NOT NULL, - "ruleContent" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "description" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "modsec_custom_rules_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE INDEX "modsec_crs_rules_domainId_idx" ON "modsec_crs_rules"("domainId"); - --- CreateIndex -CREATE INDEX "modsec_crs_rules_category_idx" ON "modsec_crs_rules"("category"); - --- CreateIndex -CREATE UNIQUE INDEX "modsec_crs_rules_ruleFile_domainId_key" ON "modsec_crs_rules"("ruleFile", "domainId"); - --- CreateIndex -CREATE INDEX "modsec_custom_rules_domainId_idx" ON "modsec_custom_rules"("domainId"); - --- CreateIndex -CREATE INDEX "modsec_custom_rules_category_idx" ON "modsec_custom_rules"("category"); - --- AddForeignKey -ALTER TABLE "modsec_crs_rules" ADD CONSTRAINT "modsec_crs_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "modsec_custom_rules" ADD CONSTRAINT "modsec_custom_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251001083755_separate_crs_and_custom_rules/migration.sql b/apps/api/prisma/migrations/20251001083755_separate_crs_and_custom_rules/migration.sql deleted file mode 100644 index af5102c..0000000 --- a/apps/api/prisma/migrations/20251001083755_separate_crs_and_custom_rules/migration.sql +++ /dev/null @@ -1 +0,0 @@ --- This is an empty migration. \ No newline at end of file diff --git a/apps/api/prisma/migrations/20251001163237_add_performance_metrics/migration.sql b/apps/api/prisma/migrations/20251001163237_add_performance_metrics/migration.sql deleted file mode 100644 index 7ec49f8..0000000 --- a/apps/api/prisma/migrations/20251001163237_add_performance_metrics/migration.sql +++ /dev/null @@ -1,142 +0,0 @@ -/* - Warnings: - - - You are about to drop the `modsec_custom_rules` table. If the table is not empty, all the data it contains will be lost. - -*/ --- CreateEnum -CREATE TYPE "NotificationChannelType" AS ENUM ('email', 'telegram'); - --- CreateEnum -CREATE TYPE "AlertSeverity" AS ENUM ('critical', 'warning', 'info'); - --- CreateEnum -CREATE TYPE "AclType" AS ENUM ('whitelist', 'blacklist'); - --- CreateEnum -CREATE TYPE "AclField" AS ENUM ('ip', 'geoip', 'user_agent', 'url', 'method', 'header'); - --- CreateEnum -CREATE TYPE "AclOperator" AS ENUM ('equals', 'contains', 'regex'); - --- CreateEnum -CREATE TYPE "AclAction" AS ENUM ('allow', 'deny', 'challenge'); - --- DropForeignKey -ALTER TABLE "modsec_custom_rules" DROP CONSTRAINT "modsec_custom_rules_domainId_fkey"; - --- DropTable -DROP TABLE "modsec_custom_rules"; - --- CreateTable -CREATE TABLE "modsec_rules" ( - "id" TEXT NOT NULL, - "domainId" TEXT, - "name" TEXT NOT NULL, - "category" TEXT NOT NULL, - "ruleContent" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "description" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "modsec_rules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "notification_channels" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "type" "NotificationChannelType" NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "config" JSONB NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "notification_channels_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "alert_rules" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "condition" TEXT NOT NULL, - "threshold" INTEGER NOT NULL, - "severity" "AlertSeverity" NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "checkInterval" INTEGER NOT NULL DEFAULT 60, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "alert_rules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "alert_rule_channels" ( - "id" TEXT NOT NULL, - "ruleId" TEXT NOT NULL, - "channelId" TEXT NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "alert_rule_channels_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "acl_rules" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "type" "AclType" NOT NULL, - "conditionField" "AclField" NOT NULL, - "conditionOperator" "AclOperator" NOT NULL, - "conditionValue" TEXT NOT NULL, - "action" "AclAction" NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "acl_rules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "performance_metrics" ( - "id" TEXT NOT NULL, - "domain" TEXT NOT NULL, - "timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "responseTime" DOUBLE PRECISION NOT NULL, - "throughput" DOUBLE PRECISION NOT NULL, - "errorRate" DOUBLE PRECISION NOT NULL, - "requestCount" INTEGER NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "performance_metrics_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE INDEX "modsec_rules_domainId_idx" ON "modsec_rules"("domainId"); - --- CreateIndex -CREATE INDEX "modsec_rules_category_idx" ON "modsec_rules"("category"); - --- CreateIndex -CREATE INDEX "alert_rule_channels_ruleId_idx" ON "alert_rule_channels"("ruleId"); - --- CreateIndex -CREATE INDEX "alert_rule_channels_channelId_idx" ON "alert_rule_channels"("channelId"); - --- CreateIndex -CREATE UNIQUE INDEX "alert_rule_channels_ruleId_channelId_key" ON "alert_rule_channels"("ruleId", "channelId"); - --- CreateIndex -CREATE INDEX "performance_metrics_domain_timestamp_idx" ON "performance_metrics"("domain", "timestamp"); - --- CreateIndex -CREATE INDEX "performance_metrics_timestamp_idx" ON "performance_metrics"("timestamp"); - --- AddForeignKey -ALTER TABLE "modsec_rules" ADD CONSTRAINT "modsec_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "alert_rule_channels" ADD CONSTRAINT "alert_rule_channels_ruleId_fkey" FOREIGN KEY ("ruleId") REFERENCES "alert_rules"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "alert_rule_channels" ADD CONSTRAINT "alert_rule_channels_channelId_fkey" FOREIGN KEY ("channelId") REFERENCES "notification_channels"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251002030304_add_alert_history/migration.sql b/apps/api/prisma/migrations/20251002030304_add_alert_history/migration.sql deleted file mode 100644 index 65beee2..0000000 --- a/apps/api/prisma/migrations/20251002030304_add_alert_history/migration.sql +++ /dev/null @@ -1,23 +0,0 @@ --- CreateTable -CREATE TABLE "alert_history" ( - "id" TEXT NOT NULL, - "severity" "AlertSeverity" NOT NULL, - "message" TEXT NOT NULL, - "source" TEXT NOT NULL, - "acknowledged" BOOLEAN NOT NULL DEFAULT false, - "acknowledgedBy" TEXT, - "acknowledgedAt" TIMESTAMP(3), - "timestamp" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "alert_history_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE INDEX "alert_history_severity_idx" ON "alert_history"("severity"); - --- CreateIndex -CREATE INDEX "alert_history_acknowledged_idx" ON "alert_history"("acknowledged"); - --- CreateIndex -CREATE INDEX "alert_history_timestamp_idx" ON "alert_history"("timestamp"); diff --git a/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql b/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql deleted file mode 100644 index 12fe8b1..0000000 --- a/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql +++ /dev/null @@ -1,41 +0,0 @@ --- CreateEnum -CREATE TYPE "BackupStatus" AS ENUM ('success', 'failed', 'running', 'pending'); - --- CreateTable -CREATE TABLE "backup_schedules" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "schedule" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "lastRun" TIMESTAMP(3), - "nextRun" TIMESTAMP(3), - "status" "BackupStatus" NOT NULL DEFAULT 'pending', - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "backup_schedules_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "backup_files" ( - "id" TEXT NOT NULL, - "scheduleId" TEXT, - "filename" TEXT NOT NULL, - "filepath" TEXT NOT NULL, - "size" BIGINT NOT NULL, - "status" "BackupStatus" NOT NULL DEFAULT 'success', - "type" TEXT NOT NULL DEFAULT 'full', - "metadata" JSONB, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "backup_files_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE INDEX "backup_files_scheduleId_idx" ON "backup_files"("scheduleId"); - --- CreateIndex -CREATE INDEX "backup_files_createdAt_idx" ON "backup_files"("createdAt"); - --- AddForeignKey -ALTER TABLE "backup_files" ADD CONSTRAINT "backup_files_scheduleId_fkey" FOREIGN KEY ("scheduleId") REFERENCES "backup_schedules"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql b/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql deleted file mode 100644 index 232cb55..0000000 --- a/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql +++ /dev/null @@ -1,85 +0,0 @@ --- CreateEnum -CREATE TYPE "SlaveNodeStatus" AS ENUM ('online', 'offline', 'syncing', 'error'); - --- CreateEnum -CREATE TYPE "SyncLogStatus" AS ENUM ('success', 'failed', 'partial', 'running'); - --- CreateEnum -CREATE TYPE "SyncLogType" AS ENUM ('full_sync', 'incremental_sync', 'health_check'); - --- CreateTable -CREATE TABLE "slave_nodes" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "host" TEXT NOT NULL, - "port" INTEGER NOT NULL DEFAULT 3001, - "apiKey" TEXT NOT NULL, - "status" "SlaveNodeStatus" NOT NULL DEFAULT 'offline', - "lastSeen" TIMESTAMP(3), - "version" TEXT, - "syncEnabled" BOOLEAN NOT NULL DEFAULT true, - "syncInterval" INTEGER NOT NULL DEFAULT 60, - "configHash" TEXT, - "lastSyncAt" TIMESTAMP(3), - "latency" INTEGER, - "cpuUsage" DOUBLE PRECISION, - "memoryUsage" DOUBLE PRECISION, - "diskUsage" DOUBLE PRECISION, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "slave_nodes_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "sync_logs" ( - "id" TEXT NOT NULL, - "nodeId" TEXT NOT NULL, - "type" "SyncLogType" NOT NULL, - "status" "SyncLogStatus" NOT NULL DEFAULT 'running', - "configHash" TEXT, - "changesCount" INTEGER, - "errorMessage" TEXT, - "startedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "completedAt" TIMESTAMP(3), - "duration" INTEGER, - - CONSTRAINT "sync_logs_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "config_versions" ( - "id" TEXT NOT NULL, - "version" SERIAL NOT NULL, - "configHash" TEXT NOT NULL, - "configData" JSONB NOT NULL, - "createdBy" TEXT, - "description" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "config_versions_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "slave_nodes_name_key" ON "slave_nodes"("name"); - --- CreateIndex -CREATE UNIQUE INDEX "slave_nodes_apiKey_key" ON "slave_nodes"("apiKey"); - --- CreateIndex -CREATE INDEX "slave_nodes_status_idx" ON "slave_nodes"("status"); - --- CreateIndex -CREATE INDEX "slave_nodes_lastSeen_idx" ON "slave_nodes"("lastSeen"); - --- CreateIndex -CREATE INDEX "sync_logs_nodeId_startedAt_idx" ON "sync_logs"("nodeId", "startedAt"); - --- CreateIndex -CREATE UNIQUE INDEX "config_versions_configHash_key" ON "config_versions"("configHash"); - --- CreateIndex -CREATE INDEX "config_versions_createdAt_idx" ON "config_versions"("createdAt"); - --- AddForeignKey -ALTER TABLE "sync_logs" ADD CONSTRAINT "sync_logs_nodeId_fkey" FOREIGN KEY ("nodeId") REFERENCES "slave_nodes"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql b/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql deleted file mode 100644 index 760d762..0000000 --- a/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql +++ /dev/null @@ -1,22 +0,0 @@ --- CreateEnum -CREATE TYPE "NodeMode" AS ENUM ('master', 'slave'); - --- CreateTable -CREATE TABLE "system_configs" ( - "id" TEXT NOT NULL, - "nodeMode" "NodeMode" NOT NULL DEFAULT 'master', - "masterApiEnabled" BOOLEAN NOT NULL DEFAULT true, - "slaveApiEnabled" BOOLEAN NOT NULL DEFAULT false, - "masterHost" TEXT, - "masterPort" INTEGER, - "masterApiKey" TEXT, - "syncInterval" INTEGER NOT NULL DEFAULT 60, - "lastSyncHash" TEXT, - "connected" BOOLEAN NOT NULL DEFAULT false, - "lastConnectedAt" TIMESTAMP(3), - "connectionError" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "system_configs_pkey" PRIMARY KEY ("id") -); diff --git a/apps/api/prisma/migrations/20251007145737_make_activity_log_user_id_optional/migration.sql b/apps/api/prisma/migrations/20251007145737_make_activity_log_user_id_optional/migration.sql deleted file mode 100644 index f1ed743..0000000 --- a/apps/api/prisma/migrations/20251007145737_make_activity_log_user_id_optional/migration.sql +++ /dev/null @@ -1,5 +0,0 @@ --- AlterEnum -ALTER TYPE "ActivityType" ADD VALUE 'system'; - --- AlterTable -ALTER TABLE "activity_logs" ALTER COLUMN "userId" DROP NOT NULL; diff --git a/apps/api/prisma/migrations/20251008110124_add_first_login_flag/migration.sql b/apps/api/prisma/migrations/20251008110124_add_first_login_flag/migration.sql deleted file mode 100644 index 64a6721..0000000 --- a/apps/api/prisma/migrations/20251008110124_add_first_login_flag/migration.sql +++ /dev/null @@ -1,2 +0,0 @@ --- AlterTable -ALTER TABLE "users" ADD COLUMN "isFirstLogin" BOOLEAN NOT NULL DEFAULT true; diff --git a/apps/api/prisma/migrations/20251009081000_add_real_ip_config/migration.sql b/apps/api/prisma/migrations/20251009081000_add_real_ip_config/migration.sql deleted file mode 100644 index af5102c..0000000 --- a/apps/api/prisma/migrations/20251009081000_add_real_ip_config/migration.sql +++ /dev/null @@ -1 +0,0 @@ --- This is an empty migration. \ No newline at end of file diff --git a/apps/api/prisma/migrations/20251009081041_add_real_ip_config/migration.sql b/apps/api/prisma/migrations/20251009081041_add_real_ip_config/migration.sql deleted file mode 100644 index 894f049..0000000 --- a/apps/api/prisma/migrations/20251009081041_add_real_ip_config/migration.sql +++ /dev/null @@ -1,4 +0,0 @@ --- AlterTable -ALTER TABLE "domains" ADD COLUMN "realIpCloudflare" BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN "realIpCustomCidrs" TEXT[] DEFAULT ARRAY[]::TEXT[], -ADD COLUMN "realIpEnabled" BOOLEAN NOT NULL DEFAULT false; diff --git a/apps/api/prisma/migrations/20251011072500_add_network_load_balancer/migration.sql b/apps/api/prisma/migrations/20251011072500_add_network_load_balancer/migration.sql deleted file mode 100644 index 77ca4db..0000000 --- a/apps/api/prisma/migrations/20251011072500_add_network_load_balancer/migration.sql +++ /dev/null @@ -1,100 +0,0 @@ --- CreateEnum -CREATE TYPE "NLBStatus" AS ENUM ('active', 'inactive', 'error'); - --- CreateEnum -CREATE TYPE "NLBProtocol" AS ENUM ('tcp', 'udp', 'tcp_udp'); - --- CreateEnum -CREATE TYPE "NLBAlgorithm" AS ENUM ('round_robin', 'least_conn', 'ip_hash', 'hash'); - --- CreateEnum -CREATE TYPE "NLBUpstreamStatus" AS ENUM ('up', 'down', 'checking'); - --- CreateTable -CREATE TABLE "network_load_balancers" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "description" TEXT, - "port" INTEGER NOT NULL, - "protocol" "NLBProtocol" NOT NULL DEFAULT 'tcp', - "algorithm" "NLBAlgorithm" NOT NULL DEFAULT 'round_robin', - "status" "NLBStatus" NOT NULL DEFAULT 'inactive', - "enabled" BOOLEAN NOT NULL DEFAULT true, - "proxyTimeout" INTEGER NOT NULL DEFAULT 3, - "proxyConnectTimeout" INTEGER NOT NULL DEFAULT 1, - "proxyNextUpstream" BOOLEAN NOT NULL DEFAULT true, - "proxyNextUpstreamTimeout" INTEGER NOT NULL DEFAULT 0, - "proxyNextUpstreamTries" INTEGER NOT NULL DEFAULT 0, - "healthCheckEnabled" BOOLEAN NOT NULL DEFAULT true, - "healthCheckInterval" INTEGER NOT NULL DEFAULT 10, - "healthCheckTimeout" INTEGER NOT NULL DEFAULT 5, - "healthCheckRises" INTEGER NOT NULL DEFAULT 2, - "healthCheckFalls" INTEGER NOT NULL DEFAULT 3, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "network_load_balancers_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "nlb_upstreams" ( - "id" TEXT NOT NULL, - "nlbId" TEXT NOT NULL, - "host" TEXT NOT NULL, - "port" INTEGER NOT NULL, - "weight" INTEGER NOT NULL DEFAULT 1, - "maxFails" INTEGER NOT NULL DEFAULT 3, - "failTimeout" INTEGER NOT NULL DEFAULT 10, - "maxConns" INTEGER NOT NULL DEFAULT 0, - "backup" BOOLEAN NOT NULL DEFAULT false, - "down" BOOLEAN NOT NULL DEFAULT false, - "status" "NLBUpstreamStatus" NOT NULL DEFAULT 'checking', - "lastCheck" TIMESTAMP(3), - "lastError" TEXT, - "responseTime" DOUBLE PRECISION, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "nlb_upstreams_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "nlb_health_checks" ( - "id" TEXT NOT NULL, - "nlbId" TEXT NOT NULL, - "upstreamHost" TEXT NOT NULL, - "upstreamPort" INTEGER NOT NULL, - "status" "NLBUpstreamStatus" NOT NULL, - "responseTime" DOUBLE PRECISION, - "error" TEXT, - "checkedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - - CONSTRAINT "nlb_health_checks_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "network_load_balancers_name_key" ON "network_load_balancers"("name"); - --- CreateIndex -CREATE INDEX "network_load_balancers_status_idx" ON "network_load_balancers"("status"); - --- CreateIndex -CREATE INDEX "network_load_balancers_port_idx" ON "network_load_balancers"("port"); - --- CreateIndex -CREATE INDEX "nlb_upstreams_nlbId_idx" ON "nlb_upstreams"("nlbId"); - --- CreateIndex -CREATE INDEX "nlb_upstreams_status_idx" ON "nlb_upstreams"("status"); - --- CreateIndex -CREATE INDEX "nlb_health_checks_nlbId_checkedAt_idx" ON "nlb_health_checks"("nlbId", "checkedAt"); - --- CreateIndex -CREATE INDEX "nlb_health_checks_upstreamHost_upstreamPort_idx" ON "nlb_health_checks"("upstreamHost", "upstreamPort"); - --- AddForeignKey -ALTER TABLE "nlb_upstreams" ADD CONSTRAINT "nlb_upstreams_nlbId_fkey" FOREIGN KEY ("nlbId") REFERENCES "network_load_balancers"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "nlb_health_checks" ADD CONSTRAINT "nlb_health_checks_nlbId_fkey" FOREIGN KEY ("nlbId") REFERENCES "network_load_balancers"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251014043307_add_domain_advanced_settings/migration.sql b/apps/api/prisma/migrations/20251014043307_add_domain_advanced_settings/migration.sql deleted file mode 100644 index 82b0e96..0000000 --- a/apps/api/prisma/migrations/20251014043307_add_domain_advanced_settings/migration.sql +++ /dev/null @@ -1,5 +0,0 @@ --- AlterTable -ALTER TABLE "domains" ADD COLUMN "customLocations" JSONB, -ADD COLUMN "grpcEnabled" BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN "hstsEnabled" BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN "http2Enabled" BOOLEAN NOT NULL DEFAULT true; diff --git a/apps/api/prisma/migrations/20251014102338_add_access_lists_management/migration.sql b/apps/api/prisma/migrations/20251014102338_add_access_lists_management/migration.sql deleted file mode 100644 index 1ade394..0000000 --- a/apps/api/prisma/migrations/20251014102338_add_access_lists_management/migration.sql +++ /dev/null @@ -1,74 +0,0 @@ --- CreateEnum -CREATE TYPE "AccessListType" AS ENUM ('ip_whitelist', 'http_basic_auth', 'combined'); - --- CreateTable -CREATE TABLE "access_lists" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "description" TEXT, - "type" "AccessListType" NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "allowedIps" TEXT[] DEFAULT ARRAY[]::TEXT[], - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "access_lists_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "access_list_auth_users" ( - "id" TEXT NOT NULL, - "accessListId" TEXT NOT NULL, - "username" TEXT NOT NULL, - "passwordHash" TEXT NOT NULL, - "description" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "access_list_auth_users_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "access_list_domains" ( - "id" TEXT NOT NULL, - "accessListId" TEXT NOT NULL, - "domainId" TEXT NOT NULL, - "enabled" BOOLEAN NOT NULL DEFAULT true, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "access_list_domains_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "access_lists_name_key" ON "access_lists"("name"); - --- CreateIndex -CREATE INDEX "access_lists_type_idx" ON "access_lists"("type"); - --- CreateIndex -CREATE INDEX "access_lists_enabled_idx" ON "access_lists"("enabled"); - --- CreateIndex -CREATE INDEX "access_list_auth_users_accessListId_idx" ON "access_list_auth_users"("accessListId"); - --- CreateIndex -CREATE UNIQUE INDEX "access_list_auth_users_accessListId_username_key" ON "access_list_auth_users"("accessListId", "username"); - --- CreateIndex -CREATE INDEX "access_list_domains_accessListId_idx" ON "access_list_domains"("accessListId"); - --- CreateIndex -CREATE INDEX "access_list_domains_domainId_idx" ON "access_list_domains"("domainId"); - --- CreateIndex -CREATE UNIQUE INDEX "access_list_domains_accessListId_domainId_key" ON "access_list_domains"("accessListId", "domainId"); - --- AddForeignKey -ALTER TABLE "access_list_auth_users" ADD CONSTRAINT "access_list_auth_users_accessListId_fkey" FOREIGN KEY ("accessListId") REFERENCES "access_lists"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "access_list_domains" ADD CONSTRAINT "access_list_domains_accessListId_fkey" FOREIGN KEY ("accessListId") REFERENCES "access_lists"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "access_list_domains" ADD CONSTRAINT "access_list_domains_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/migrations/20251101000000_add_ssl_certificate_details/migration.sql b/apps/api/prisma/migrations/20251101000000_add_ssl_certificate_details/migration.sql deleted file mode 100644 index a05bfef..0000000 --- a/apps/api/prisma/migrations/20251101000000_add_ssl_certificate_details/migration.sql +++ /dev/null @@ -1,12 +0,0 @@ --- AlterTable: Add new fields to ssl_certificates table for detailed certificate information -ALTER TABLE "ssl_certificates" -ADD COLUMN "subject" TEXT, -ADD COLUMN "subjectDetails" JSONB, -ADD COLUMN "issuerDetails" JSONB, -ADD COLUMN "serialNumber" TEXT; - --- Add comments to explain the new fields -COMMENT ON COLUMN "ssl_certificates"."subject" IS 'Full subject string from certificate (e.g., CN=example.com, O=Example, C=US)'; -COMMENT ON COLUMN "ssl_certificates"."subjectDetails" IS 'Parsed subject details as JSON: {commonName, organization, country}'; -COMMENT ON COLUMN "ssl_certificates"."issuerDetails" IS 'Parsed issuer details as JSON: {commonName, organization, country}'; -COMMENT ON COLUMN "ssl_certificates"."serialNumber" IS 'Certificate serial number'; diff --git a/apps/api/prisma/migrations/20251104025114_add_client_max_body_size/migration.sql b/apps/api/prisma/migrations/20251104025114_add_client_max_body_size/migration.sql deleted file mode 100644 index 778901e..0000000 --- a/apps/api/prisma/migrations/20251104025114_add_client_max_body_size/migration.sql +++ /dev/null @@ -1,2 +0,0 @@ --- AlterTable -ALTER TABLE "domains" ADD COLUMN "clientMaxBodySize" INTEGER DEFAULT 100; diff --git a/apps/api/prisma/migrations/20251104110839_init_sqlite/migration.sql b/apps/api/prisma/migrations/20251104110839_init_sqlite/migration.sql new file mode 100644 index 0000000..57d9f87 --- /dev/null +++ b/apps/api/prisma/migrations/20251104110839_init_sqlite/migration.sql @@ -0,0 +1,647 @@ +-- CreateTable +CREATE TABLE "users" ( + "id" TEXT NOT NULL PRIMARY KEY, + "username" TEXT NOT NULL, + "email" TEXT NOT NULL, + "password" TEXT NOT NULL, + "fullName" TEXT NOT NULL, + "role" TEXT NOT NULL DEFAULT 'viewer', + "status" TEXT NOT NULL DEFAULT 'active', + "avatar" TEXT, + "phone" TEXT, + "timezone" TEXT NOT NULL DEFAULT 'Asia/Ho_Chi_Minh', + "language" TEXT NOT NULL DEFAULT 'en', + "isFirstLogin" BOOLEAN NOT NULL DEFAULT true, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + "lastLogin" DATETIME +); + +-- CreateTable +CREATE TABLE "user_profiles" ( + "id" TEXT NOT NULL PRIMARY KEY, + "userId" TEXT NOT NULL, + "bio" TEXT, + "location" TEXT, + "website" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "user_profiles_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "two_factor_auth" ( + "id" TEXT NOT NULL PRIMARY KEY, + "userId" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT false, + "method" TEXT NOT NULL DEFAULT 'totp', + "secret" TEXT, + "backupCodes" TEXT DEFAULT '[]', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "two_factor_auth_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "activity_logs" ( + "id" TEXT NOT NULL PRIMARY KEY, + "userId" TEXT, + "action" TEXT NOT NULL, + "type" TEXT NOT NULL, + "ip" TEXT NOT NULL, + "userAgent" TEXT NOT NULL, + "details" TEXT, + "success" BOOLEAN NOT NULL DEFAULT true, + "timestamp" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT "activity_logs_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "refresh_tokens" ( + "id" TEXT NOT NULL PRIMARY KEY, + "userId" TEXT NOT NULL, + "token" TEXT NOT NULL, + "expiresAt" DATETIME NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "revokedAt" DATETIME, + CONSTRAINT "refresh_tokens_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "user_sessions" ( + "id" TEXT NOT NULL PRIMARY KEY, + "userId" TEXT NOT NULL, + "sessionId" TEXT NOT NULL, + "ip" TEXT NOT NULL, + "userAgent" TEXT NOT NULL, + "device" TEXT, + "location" TEXT, + "lastActive" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "expiresAt" DATETIME NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT "user_sessions_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "domains" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "status" TEXT NOT NULL DEFAULT 'inactive', + "sslEnabled" BOOLEAN NOT NULL DEFAULT false, + "sslExpiry" DATETIME, + "modsecEnabled" BOOLEAN NOT NULL DEFAULT true, + "realIpEnabled" BOOLEAN NOT NULL DEFAULT false, + "realIpCloudflare" BOOLEAN NOT NULL DEFAULT false, + "realIpCustomCidrs" TEXT NOT NULL DEFAULT '', + "hstsEnabled" BOOLEAN NOT NULL DEFAULT false, + "http2Enabled" BOOLEAN NOT NULL DEFAULT true, + "grpcEnabled" BOOLEAN NOT NULL DEFAULT false, + "clientMaxBodySize" INTEGER DEFAULT 100, + "customLocations" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "upstreams" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domainId" TEXT NOT NULL, + "host" TEXT NOT NULL, + "port" INTEGER NOT NULL, + "protocol" TEXT NOT NULL DEFAULT 'http', + "sslVerify" BOOLEAN NOT NULL DEFAULT true, + "weight" INTEGER NOT NULL DEFAULT 1, + "maxFails" INTEGER NOT NULL DEFAULT 3, + "failTimeout" INTEGER NOT NULL DEFAULT 10, + "status" TEXT NOT NULL DEFAULT 'checking', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "upstreams_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "load_balancer_configs" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domainId" TEXT NOT NULL, + "algorithm" TEXT NOT NULL DEFAULT 'round_robin', + "healthCheckEnabled" BOOLEAN NOT NULL DEFAULT true, + "healthCheckInterval" INTEGER NOT NULL DEFAULT 30, + "healthCheckTimeout" INTEGER NOT NULL DEFAULT 5, + "healthCheckPath" TEXT NOT NULL DEFAULT '/', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "load_balancer_configs_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "ssl_certificates" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domainId" TEXT NOT NULL, + "commonName" TEXT NOT NULL, + "sans" TEXT NOT NULL, + "issuer" TEXT NOT NULL, + "subject" TEXT, + "certificate" TEXT NOT NULL, + "privateKey" TEXT NOT NULL, + "chain" TEXT, + "subjectDetails" TEXT, + "issuerDetails" TEXT, + "serialNumber" TEXT, + "validFrom" DATETIME NOT NULL, + "validTo" DATETIME NOT NULL, + "autoRenew" BOOLEAN NOT NULL DEFAULT true, + "status" TEXT NOT NULL DEFAULT 'valid', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "ssl_certificates_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "modsec_crs_rules" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domainId" TEXT, + "ruleFile" TEXT NOT NULL, + "name" TEXT NOT NULL, + "category" TEXT NOT NULL, + "description" TEXT, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "paranoia" INTEGER NOT NULL DEFAULT 1, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "modsec_crs_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "modsec_rules" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domainId" TEXT, + "name" TEXT NOT NULL, + "category" TEXT NOT NULL, + "ruleContent" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "description" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "modsec_rules_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "nginx_configs" ( + "id" TEXT NOT NULL PRIMARY KEY, + "configType" TEXT NOT NULL, + "name" TEXT NOT NULL, + "content" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "installation_status" ( + "id" TEXT NOT NULL PRIMARY KEY, + "component" TEXT NOT NULL, + "status" TEXT NOT NULL, + "step" TEXT, + "message" TEXT, + "progress" INTEGER NOT NULL DEFAULT 0, + "startedAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "completedAt" DATETIME, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "notification_channels" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "type" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "config" TEXT NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "alert_rules" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "condition" TEXT NOT NULL, + "threshold" INTEGER NOT NULL, + "severity" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "checkInterval" INTEGER NOT NULL DEFAULT 60, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "alert_rule_channels" ( + "id" TEXT NOT NULL PRIMARY KEY, + "ruleId" TEXT NOT NULL, + "channelId" TEXT NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT "alert_rule_channels_ruleId_fkey" FOREIGN KEY ("ruleId") REFERENCES "alert_rules" ("id") ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT "alert_rule_channels_channelId_fkey" FOREIGN KEY ("channelId") REFERENCES "notification_channels" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "alert_history" ( + "id" TEXT NOT NULL PRIMARY KEY, + "severity" TEXT NOT NULL, + "message" TEXT NOT NULL, + "source" TEXT NOT NULL, + "acknowledged" BOOLEAN NOT NULL DEFAULT false, + "acknowledgedBy" TEXT, + "acknowledgedAt" DATETIME, + "timestamp" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- CreateTable +CREATE TABLE "acl_rules" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "type" TEXT NOT NULL, + "conditionField" TEXT NOT NULL, + "conditionOperator" TEXT NOT NULL, + "conditionValue" TEXT NOT NULL, + "action" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "access_lists" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "description" TEXT, + "type" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "allowedIps" TEXT NOT NULL DEFAULT '', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "access_list_auth_users" ( + "id" TEXT NOT NULL PRIMARY KEY, + "accessListId" TEXT NOT NULL, + "username" TEXT NOT NULL, + "passwordHash" TEXT NOT NULL, + "description" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "access_list_auth_users_accessListId_fkey" FOREIGN KEY ("accessListId") REFERENCES "access_lists" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "access_list_domains" ( + "id" TEXT NOT NULL PRIMARY KEY, + "accessListId" TEXT NOT NULL, + "domainId" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "access_list_domains_accessListId_fkey" FOREIGN KEY ("accessListId") REFERENCES "access_lists" ("id") ON DELETE CASCADE ON UPDATE CASCADE, + CONSTRAINT "access_list_domains_domainId_fkey" FOREIGN KEY ("domainId") REFERENCES "domains" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "performance_metrics" ( + "id" TEXT NOT NULL PRIMARY KEY, + "domain" TEXT NOT NULL, + "timestamp" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "responseTime" REAL NOT NULL, + "throughput" REAL NOT NULL, + "errorRate" REAL NOT NULL, + "requestCount" INTEGER NOT NULL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- CreateTable +CREATE TABLE "backup_schedules" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "schedule" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "lastRun" DATETIME, + "nextRun" DATETIME, + "status" TEXT NOT NULL DEFAULT 'pending', + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "backup_files" ( + "id" TEXT NOT NULL PRIMARY KEY, + "scheduleId" TEXT, + "filename" TEXT NOT NULL, + "filepath" TEXT NOT NULL, + "size" INTEGER NOT NULL, + "status" TEXT NOT NULL DEFAULT 'success', + "type" TEXT NOT NULL DEFAULT 'full', + "metadata" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT "backup_files_scheduleId_fkey" FOREIGN KEY ("scheduleId") REFERENCES "backup_schedules" ("id") ON DELETE SET NULL ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "slave_nodes" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "host" TEXT NOT NULL, + "port" INTEGER NOT NULL DEFAULT 3001, + "apiKey" TEXT NOT NULL, + "status" TEXT NOT NULL DEFAULT 'offline', + "lastSeen" DATETIME, + "version" TEXT, + "syncEnabled" BOOLEAN NOT NULL DEFAULT true, + "syncInterval" INTEGER NOT NULL DEFAULT 60, + "configHash" TEXT, + "lastSyncAt" DATETIME, + "latency" INTEGER, + "cpuUsage" REAL, + "memoryUsage" REAL, + "diskUsage" REAL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "system_configs" ( + "id" TEXT NOT NULL PRIMARY KEY, + "nodeMode" TEXT NOT NULL DEFAULT 'master', + "masterApiEnabled" BOOLEAN NOT NULL DEFAULT true, + "slaveApiEnabled" BOOLEAN NOT NULL DEFAULT false, + "masterHost" TEXT, + "masterPort" INTEGER, + "masterApiKey" TEXT, + "syncInterval" INTEGER NOT NULL DEFAULT 60, + "lastSyncHash" TEXT, + "connected" BOOLEAN NOT NULL DEFAULT false, + "lastConnectedAt" DATETIME, + "connectionError" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "sync_logs" ( + "id" TEXT NOT NULL PRIMARY KEY, + "nodeId" TEXT NOT NULL, + "type" TEXT NOT NULL, + "status" TEXT NOT NULL DEFAULT 'running', + "configHash" TEXT, + "changesCount" INTEGER, + "errorMessage" TEXT, + "startedAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "completedAt" DATETIME, + "duration" INTEGER, + CONSTRAINT "sync_logs_nodeId_fkey" FOREIGN KEY ("nodeId") REFERENCES "slave_nodes" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "config_versions" ( + "id" TEXT NOT NULL PRIMARY KEY, + "version" INTEGER NOT NULL DEFAULT 1, + "configHash" TEXT NOT NULL, + "configData" TEXT NOT NULL, + "createdBy" TEXT, + "description" TEXT, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- CreateTable +CREATE TABLE "network_load_balancers" ( + "id" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "description" TEXT, + "port" INTEGER NOT NULL, + "protocol" TEXT NOT NULL DEFAULT 'tcp', + "algorithm" TEXT NOT NULL DEFAULT 'round_robin', + "status" TEXT NOT NULL DEFAULT 'inactive', + "enabled" BOOLEAN NOT NULL DEFAULT true, + "proxyTimeout" INTEGER NOT NULL DEFAULT 3, + "proxyConnectTimeout" INTEGER NOT NULL DEFAULT 1, + "proxyNextUpstream" BOOLEAN NOT NULL DEFAULT true, + "proxyNextUpstreamTimeout" INTEGER NOT NULL DEFAULT 0, + "proxyNextUpstreamTries" INTEGER NOT NULL DEFAULT 0, + "healthCheckEnabled" BOOLEAN NOT NULL DEFAULT true, + "healthCheckInterval" INTEGER NOT NULL DEFAULT 10, + "healthCheckTimeout" INTEGER NOT NULL DEFAULT 5, + "healthCheckRises" INTEGER NOT NULL DEFAULT 2, + "healthCheckFalls" INTEGER NOT NULL DEFAULT 3, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL +); + +-- CreateTable +CREATE TABLE "nlb_upstreams" ( + "id" TEXT NOT NULL PRIMARY KEY, + "nlbId" TEXT NOT NULL, + "host" TEXT NOT NULL, + "port" INTEGER NOT NULL, + "weight" INTEGER NOT NULL DEFAULT 1, + "maxFails" INTEGER NOT NULL DEFAULT 3, + "failTimeout" INTEGER NOT NULL DEFAULT 10, + "maxConns" INTEGER NOT NULL DEFAULT 0, + "backup" BOOLEAN NOT NULL DEFAULT false, + "down" BOOLEAN NOT NULL DEFAULT false, + "status" TEXT NOT NULL DEFAULT 'checking', + "lastCheck" DATETIME, + "lastError" TEXT, + "responseTime" REAL, + "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" DATETIME NOT NULL, + CONSTRAINT "nlb_upstreams_nlbId_fkey" FOREIGN KEY ("nlbId") REFERENCES "network_load_balancers" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateTable +CREATE TABLE "nlb_health_checks" ( + "id" TEXT NOT NULL PRIMARY KEY, + "nlbId" TEXT NOT NULL, + "upstreamHost" TEXT NOT NULL, + "upstreamPort" INTEGER NOT NULL, + "status" TEXT NOT NULL, + "responseTime" REAL, + "error" TEXT, + "checkedAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT "nlb_health_checks_nlbId_fkey" FOREIGN KEY ("nlbId") REFERENCES "network_load_balancers" ("id") ON DELETE CASCADE ON UPDATE CASCADE +); + +-- CreateIndex +CREATE UNIQUE INDEX "users_username_key" ON "users"("username"); + +-- CreateIndex +CREATE UNIQUE INDEX "users_email_key" ON "users"("email"); + +-- CreateIndex +CREATE UNIQUE INDEX "user_profiles_userId_key" ON "user_profiles"("userId"); + +-- CreateIndex +CREATE UNIQUE INDEX "two_factor_auth_userId_key" ON "two_factor_auth"("userId"); + +-- CreateIndex +CREATE INDEX "activity_logs_userId_timestamp_idx" ON "activity_logs"("userId", "timestamp"); + +-- CreateIndex +CREATE INDEX "activity_logs_type_timestamp_idx" ON "activity_logs"("type", "timestamp"); + +-- CreateIndex +CREATE UNIQUE INDEX "refresh_tokens_token_key" ON "refresh_tokens"("token"); + +-- CreateIndex +CREATE INDEX "refresh_tokens_userId_idx" ON "refresh_tokens"("userId"); + +-- CreateIndex +CREATE INDEX "refresh_tokens_token_idx" ON "refresh_tokens"("token"); + +-- CreateIndex +CREATE UNIQUE INDEX "user_sessions_sessionId_key" ON "user_sessions"("sessionId"); + +-- CreateIndex +CREATE INDEX "user_sessions_userId_idx" ON "user_sessions"("userId"); + +-- CreateIndex +CREATE INDEX "user_sessions_sessionId_idx" ON "user_sessions"("sessionId"); + +-- CreateIndex +CREATE UNIQUE INDEX "domains_name_key" ON "domains"("name"); + +-- CreateIndex +CREATE INDEX "domains_name_idx" ON "domains"("name"); + +-- CreateIndex +CREATE INDEX "domains_status_idx" ON "domains"("status"); + +-- CreateIndex +CREATE INDEX "upstreams_domainId_idx" ON "upstreams"("domainId"); + +-- CreateIndex +CREATE UNIQUE INDEX "load_balancer_configs_domainId_key" ON "load_balancer_configs"("domainId"); + +-- CreateIndex +CREATE UNIQUE INDEX "ssl_certificates_domainId_key" ON "ssl_certificates"("domainId"); + +-- CreateIndex +CREATE INDEX "ssl_certificates_domainId_idx" ON "ssl_certificates"("domainId"); + +-- CreateIndex +CREATE INDEX "ssl_certificates_validTo_idx" ON "ssl_certificates"("validTo"); + +-- CreateIndex +CREATE INDEX "modsec_crs_rules_domainId_idx" ON "modsec_crs_rules"("domainId"); + +-- CreateIndex +CREATE INDEX "modsec_crs_rules_category_idx" ON "modsec_crs_rules"("category"); + +-- CreateIndex +CREATE UNIQUE INDEX "modsec_crs_rules_ruleFile_domainId_key" ON "modsec_crs_rules"("ruleFile", "domainId"); + +-- CreateIndex +CREATE INDEX "modsec_rules_domainId_idx" ON "modsec_rules"("domainId"); + +-- CreateIndex +CREATE INDEX "modsec_rules_category_idx" ON "modsec_rules"("category"); + +-- CreateIndex +CREATE INDEX "nginx_configs_configType_idx" ON "nginx_configs"("configType"); + +-- CreateIndex +CREATE UNIQUE INDEX "installation_status_component_key" ON "installation_status"("component"); + +-- CreateIndex +CREATE INDEX "alert_rule_channels_ruleId_idx" ON "alert_rule_channels"("ruleId"); + +-- CreateIndex +CREATE INDEX "alert_rule_channels_channelId_idx" ON "alert_rule_channels"("channelId"); + +-- CreateIndex +CREATE UNIQUE INDEX "alert_rule_channels_ruleId_channelId_key" ON "alert_rule_channels"("ruleId", "channelId"); + +-- CreateIndex +CREATE INDEX "alert_history_severity_idx" ON "alert_history"("severity"); + +-- CreateIndex +CREATE INDEX "alert_history_acknowledged_idx" ON "alert_history"("acknowledged"); + +-- CreateIndex +CREATE INDEX "alert_history_timestamp_idx" ON "alert_history"("timestamp"); + +-- CreateIndex +CREATE UNIQUE INDEX "access_lists_name_key" ON "access_lists"("name"); + +-- CreateIndex +CREATE INDEX "access_lists_type_idx" ON "access_lists"("type"); + +-- CreateIndex +CREATE INDEX "access_lists_enabled_idx" ON "access_lists"("enabled"); + +-- CreateIndex +CREATE INDEX "access_list_auth_users_accessListId_idx" ON "access_list_auth_users"("accessListId"); + +-- CreateIndex +CREATE UNIQUE INDEX "access_list_auth_users_accessListId_username_key" ON "access_list_auth_users"("accessListId", "username"); + +-- CreateIndex +CREATE INDEX "access_list_domains_accessListId_idx" ON "access_list_domains"("accessListId"); + +-- CreateIndex +CREATE INDEX "access_list_domains_domainId_idx" ON "access_list_domains"("domainId"); + +-- CreateIndex +CREATE UNIQUE INDEX "access_list_domains_accessListId_domainId_key" ON "access_list_domains"("accessListId", "domainId"); + +-- CreateIndex +CREATE INDEX "performance_metrics_domain_timestamp_idx" ON "performance_metrics"("domain", "timestamp"); + +-- CreateIndex +CREATE INDEX "performance_metrics_timestamp_idx" ON "performance_metrics"("timestamp"); + +-- CreateIndex +CREATE INDEX "backup_files_scheduleId_idx" ON "backup_files"("scheduleId"); + +-- CreateIndex +CREATE INDEX "backup_files_createdAt_idx" ON "backup_files"("createdAt"); + +-- CreateIndex +CREATE UNIQUE INDEX "slave_nodes_name_key" ON "slave_nodes"("name"); + +-- CreateIndex +CREATE UNIQUE INDEX "slave_nodes_apiKey_key" ON "slave_nodes"("apiKey"); + +-- CreateIndex +CREATE INDEX "slave_nodes_status_idx" ON "slave_nodes"("status"); + +-- CreateIndex +CREATE INDEX "slave_nodes_lastSeen_idx" ON "slave_nodes"("lastSeen"); + +-- CreateIndex +CREATE INDEX "sync_logs_nodeId_startedAt_idx" ON "sync_logs"("nodeId", "startedAt"); + +-- CreateIndex +CREATE UNIQUE INDEX "config_versions_configHash_key" ON "config_versions"("configHash"); + +-- CreateIndex +CREATE INDEX "config_versions_createdAt_idx" ON "config_versions"("createdAt"); + +-- CreateIndex +CREATE UNIQUE INDEX "network_load_balancers_name_key" ON "network_load_balancers"("name"); + +-- CreateIndex +CREATE INDEX "network_load_balancers_status_idx" ON "network_load_balancers"("status"); + +-- CreateIndex +CREATE INDEX "network_load_balancers_port_idx" ON "network_load_balancers"("port"); + +-- CreateIndex +CREATE INDEX "nlb_upstreams_nlbId_idx" ON "nlb_upstreams"("nlbId"); + +-- CreateIndex +CREATE INDEX "nlb_upstreams_status_idx" ON "nlb_upstreams"("status"); + +-- CreateIndex +CREATE INDEX "nlb_health_checks_nlbId_checkedAt_idx" ON "nlb_health_checks"("nlbId", "checkedAt"); + +-- CreateIndex +CREATE INDEX "nlb_health_checks_upstreamHost_upstreamPort_idx" ON "nlb_health_checks"("upstreamHost", "upstreamPort"); diff --git a/apps/api/prisma/migrations/migration_lock.toml b/apps/api/prisma/migrations/migration_lock.toml index fbffa92..e5e5c47 100644 --- a/apps/api/prisma/migrations/migration_lock.toml +++ b/apps/api/prisma/migrations/migration_lock.toml @@ -1,3 +1,3 @@ # Please do not edit this file manually # It should be added in your version-control system (i.e. Git) -provider = "postgresql" \ No newline at end of file +provider = "sqlite" \ No newline at end of file diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 5574ac5..adcf946 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -1,35 +1,53 @@ -// This is your Prisma schema file, -// learn more about it in the docs: https://pris.ly/d/prisma-schema +// This is your Prisma schema file (SQLite version) +// Converted from PostgreSQL to SQLite +// +// IMPORTANT NOTES: +// - All enum types have been converted to String (validate in application code) +// - All Json types have been converted to String (serialize/deserialize in application code) +// - All String[] arrays have been converted to String (use JSON format: "[]" or JSON.stringify) +// - autoincrement() has been replaced with default(1) for version tracking +// +// ENUM VALUE REFERENCE (for application validation): +// - AccessListType: ip_whitelist, http_basic_auth, combined +// - AclAction: allow, deny, challenge +// - AclField: ip, geoip, user_agent, url, method, header +// - AclOperator: equals, contains, regex +// - AclType: whitelist, blacklist +// - ActivityType: login, logout, config_change, user_action, security, system +// - AlertSeverity: critical, warning, info +// - BackupStatus: success, failed, running, pending +// - DomainStatus: active, inactive, error +// - LoadBalancerAlgorithm: round_robin, least_conn, ip_hash +// - NLBAlgorithm: round_robin, least_conn, ip_hash, hash +// - NLBProtocol: tcp, udp, tcp_udp +// - NLBStatus: active, inactive, error +// - NLBUpstreamStatus: up, down, checking +// - NodeMode: master, slave +// - NotificationChannelType: email, telegram +// - SSLStatus: valid, expiring, expired +// - SlaveNodeStatus: online, offline, syncing, error +// - SyncLogStatus: success, failed, partial, running +// - SyncLogType: full_sync, incremental_sync, health_check +// - UpstreamStatus: up, down, checking +// - UserRole: admin, moderator, viewer +// - UserStatus: active, inactive, suspended generator client { provider = "prisma-client-js" } datasource db { - provider = "postgresql" + provider = "sqlite" url = env("DATABASE_URL") + // SQLite DATABASE_URL format: "file:./nginx_waf.db" (relative to prisma directory) + // For production: "file:/absolute/path/to/nginx_waf.db" } -enum UserRole { - admin - moderator - viewer -} -enum UserStatus { - active - inactive - suspended -} -enum ActivityType { - login - logout - config_change - user_action - security - system -} + + + model User { id String @id @default(cuid()) @@ -37,8 +55,8 @@ model User { email String @unique password String fullName String - role UserRole @default(viewer) - status UserStatus @default(active) + role String @default("viewer") + status String @default("active") avatar String? phone String? timezone String @default("Asia/Ho_Chi_Minh") @@ -82,7 +100,7 @@ model TwoFactorAuth { enabled Boolean @default(false) method String @default("totp") // totp, sms secret String? - backupCodes String[] // Encrypted backup codes + backupCodes String? @default("[]") // JSON array of encrypted backup codes createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -96,10 +114,10 @@ model ActivityLog { user User? @relation(fields: [userId], references: [id], onDelete: Cascade) action String - type ActivityType + type String ip String - userAgent String @db.Text - details String? @db.Text + userAgent String + details String? success Boolean @default(true) timestamp DateTime @default(now()) @@ -131,7 +149,7 @@ model UserSession { sessionId String @unique ip String - userAgent String @db.Text + userAgent String device String? location String? @@ -146,34 +164,18 @@ model UserSession { // Domain Management Models -enum DomainStatus { - active - inactive - error -} -enum UpstreamStatus { - up - down - checking -} -enum LoadBalancerAlgorithm { - round_robin - least_conn - ip_hash -} -enum SSLStatus { - valid - expiring - expired -} + + + + model Domain { id String @id @default(cuid()) name String @unique - status DomainStatus @default(inactive) + status String @default("inactive") sslEnabled Boolean @default(false) sslExpiry DateTime? modsecEnabled Boolean @default(true) @@ -181,14 +183,14 @@ model Domain { // Real IP Configuration (for Cloudflare and other proxies) realIpEnabled Boolean @default(false) realIpCloudflare Boolean @default(false) // Use Cloudflare IP ranges - realIpCustomCidrs String[] @default([]) // Custom CIDR ranges for set_real_ip_from + realIpCustomCidrs String @default("") // Custom CIDR ranges for set_real_ip_from // Advanced Configuration hstsEnabled Boolean @default(false) // HTTP Strict Transport Security http2Enabled Boolean @default(true) // Enable HTTP/2 grpcEnabled Boolean @default(false) // Enable gRPC/gRPCs support clientMaxBodySize Int? @default(100) // Maximum request body size in MB (client_max_body_size) - customLocations Json? // Custom location blocks configuration + customLocations String? // Custom location blocks configuration // Relations upstreams Upstream[] @@ -218,7 +220,7 @@ model Upstream { weight Int @default(1) maxFails Int @default(3) failTimeout Int @default(10) // seconds - status UpstreamStatus @default(checking) + status String @default("checking") createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -232,7 +234,7 @@ model LoadBalancerConfig { domainId String @unique domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) - algorithm LoadBalancerAlgorithm @default(round_robin) + algorithm String @default("round_robin") healthCheckEnabled Boolean @default(true) healthCheckInterval Int @default(30) // seconds healthCheckTimeout Int @default(5) // seconds @@ -250,22 +252,22 @@ model SSLCertificate { domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) commonName String - sans String[] // Subject Alternative Names + sans String // Subject Alternative Names issuer String subject String? // Full subject string (e.g., "CN=example.com, O=Example, C=US") - certificate String @db.Text // PEM format - privateKey String @db.Text // PEM format - chain String? @db.Text // PEM format + certificate String // PEM format + privateKey String // PEM format + chain String? // PEM format // Additional certificate details stored as JSON - subjectDetails Json? // { commonName, organization, country } - issuerDetails Json? // { commonName, organization, country } + subjectDetails String? // { commonName, organization, country } + issuerDetails String? // { commonName, organization, country } serialNumber String? // Certificate serial number validFrom DateTime validTo DateTime autoRenew Boolean @default(true) - status SSLStatus @default(valid) + status String @default("valid") createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -286,7 +288,7 @@ model ModSecCRSRule { ruleFile String // e.g., "REQUEST-942-APPLICATION-ATTACK-SQLI.conf" name String category String - description String? @db.Text + description String? enabled Boolean @default(true) paranoia Int @default(1) // Paranoia level 1-4 @@ -308,9 +310,9 @@ model ModSecRule { name String category String - ruleContent String @db.Text + ruleContent String enabled Boolean @default(true) - description String? @db.Text + description String? createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -324,7 +326,7 @@ model NginxConfig { id String @id @default(cuid()) configType String // main, site, upstream, etc. name String - content String @db.Text + content String enabled Boolean @default(true) createdAt DateTime @default(now()) @@ -339,7 +341,7 @@ model InstallationStatus { component String @unique // nginx, modsecurity, etc. status String // pending, running, completed, failed step String? - message String? @db.Text + message String? progress Int @default(0) // 0-100 startedAt DateTime @default(now()) @@ -349,23 +351,16 @@ model InstallationStatus { @@map("installation_status") } -enum NotificationChannelType { - email - telegram -} -enum AlertSeverity { - critical - warning - info -} + + model NotificationChannel { id String @id @default(cuid()) name String - type NotificationChannelType + type String enabled Boolean @default(true) - config Json // { email?, chatId?, botToken? } + config String // { email?, chatId?, botToken? } alertRules AlertRuleChannel[] @@ -380,7 +375,7 @@ model AlertRule { name String condition String // cpu > threshold, upstream_status == down, etc. threshold Int - severity AlertSeverity + severity String enabled Boolean @default(true) checkInterval Int @default(60) // Check interval in seconds (default: 60s) @@ -410,8 +405,8 @@ model AlertRuleChannel { model AlertHistory { id String @id @default(cuid()) - severity AlertSeverity - message String @db.Text + severity String + message String source String acknowledged Boolean @default(false) acknowledgedBy String? @@ -426,40 +421,22 @@ model AlertHistory { @@map("alert_history") } -enum AclType { - whitelist - blacklist -} -enum AclField { - ip - geoip - user_agent - url - method - header -} -enum AclOperator { - equals - contains - regex -} -enum AclAction { - allow - deny - challenge -} + + + + model AclRule { id String @id @default(cuid()) name String - type AclType - conditionField AclField - conditionOperator AclOperator + type String + conditionField String + conditionOperator String conditionValue String - action AclAction + action String enabled Boolean @default(true) createdAt DateTime @default(now()) @@ -470,21 +447,17 @@ model AclRule { // Access Lists Management Models -enum AccessListType { - ip_whitelist - http_basic_auth - combined // Both IP and Basic Auth -} + model AccessList { id String @id @default(cuid()) name String @unique - description String? @db.Text - type AccessListType + description String? + type String enabled Boolean @default(true) // IP Whitelist configuration - allowedIps String[] @default([]) // List of allowed IP addresses/CIDR + allowedIps String @default("") // List of allowed IP addresses/CIDR // HTTP Basic Auth configuration authUsers AccessListAuthUser[] @@ -551,12 +524,7 @@ model PerformanceMetric { @@map("performance_metrics") } -enum BackupStatus { - success - failed - running - pending -} + model BackupSchedule { id String @id @default(cuid()) @@ -565,7 +533,7 @@ model BackupSchedule { enabled Boolean @default(true) lastRun DateTime? nextRun DateTime? - status BackupStatus @default(pending) + status String @default("pending") backups BackupFile[] @@ -582,11 +550,11 @@ model BackupFile { filename String filepath String - size BigInt // Size in bytes - status BackupStatus @default(success) + size Int // Size in bytes + status String @default("success") type String @default("full") // full, incremental, manual - metadata Json? // Additional metadata (domains count, rules count, etc.) + metadata String? // Additional metadata (domains count, rules count, etc.) createdAt DateTime @default(now()) @@ -595,30 +563,13 @@ model BackupFile { @@map("backup_files") } -enum SlaveNodeStatus { - online - offline - syncing - error -} -enum SyncLogStatus { - success - failed - partial - running -} -enum SyncLogType { - full_sync - incremental_sync - health_check -} -enum NodeMode { - master - slave -} + + + + model SlaveNode { id String @id @default(cuid()) @@ -627,7 +578,7 @@ model SlaveNode { port Int @default(3001) apiKey String @unique // Authentication token for slave - status SlaveNodeStatus @default(offline) + status String @default("offline") lastSeen DateTime? version String? @@ -655,7 +606,7 @@ model SlaveNode { model SystemConfig { id String @id @default(cuid()) - nodeMode NodeMode @default(master) // master or slave + nodeMode String @default("master") // master or slave // Master mode settings masterApiEnabled Boolean @default(true) @@ -684,12 +635,12 @@ model SyncLog { nodeId String node SlaveNode @relation(fields: [nodeId], references: [id], onDelete: Cascade) - type SyncLogType - status SyncLogStatus @default(running) + type String + status String @default("running") configHash String? changesCount Int? - errorMessage String? @db.Text + errorMessage String? startedAt DateTime @default(now()) completedAt DateTime? @@ -701,9 +652,9 @@ model SyncLog { model ConfigVersion { id String @id @default(cuid()) - version Int @default(autoincrement()) + version Int @default(1) // Note: SQLite only supports autoincrement() on INTEGER PRIMARY KEY configHash String @unique - configData Json // Serialized config + configData String // Serialized config createdBy String? description String? @@ -716,39 +667,22 @@ model ConfigVersion { // Network Load Balancer Models -enum NLBStatus { - active - inactive - error -} -enum NLBProtocol { - tcp - udp - tcp_udp // Both TCP and UDP -} -enum NLBAlgorithm { - round_robin - least_conn - ip_hash - hash -} -enum NLBUpstreamStatus { - up - down - checking -} + + + + model NetworkLoadBalancer { id String @id @default(cuid()) name String @unique - description String? @db.Text + description String? port Int // Listen port (must be >= 10000) - protocol NLBProtocol @default(tcp) - algorithm NLBAlgorithm @default(round_robin) - status NLBStatus @default(inactive) + protocol String @default("tcp") + algorithm String @default("round_robin") + status String @default("inactive") enabled Boolean @default(true) // Advanced settings @@ -790,11 +724,11 @@ model NLBUpstream { maxConns Int @default(0) // 0 = unlimited backup Boolean @default(false) down Boolean @default(false) // Manually mark as down - status NLBUpstreamStatus @default(checking) + status String @default("checking") // Metadata lastCheck DateTime? - lastError String? @db.Text + lastError String? responseTime Float? // milliseconds createdAt DateTime @default(now()) @@ -812,9 +746,9 @@ model NLBHealthCheck { upstreamHost String upstreamPort Int - status NLBUpstreamStatus + status String responseTime Float? // milliseconds - error String? @db.Text + error String? checkedAt DateTime @default(now()) diff --git a/apps/api/src/domains/access-lists/access-lists.repository.ts b/apps/api/src/domains/access-lists/access-lists.repository.ts index cce047e..d6f5529 100644 --- a/apps/api/src/domains/access-lists/access-lists.repository.ts +++ b/apps/api/src/domains/access-lists/access-lists.repository.ts @@ -7,6 +7,35 @@ import { } from './access-lists.types'; import { PaginationMeta } from '../../shared/types/common.types'; +/** + * Helper functions for SQLite array serialization + */ +const serializeArray = (arr: string[] | undefined): string => { + if (!arr || arr.length === 0) return '[]'; + return JSON.stringify(arr); +}; + +const deserializeArray = (str: string | null | undefined): string[] => { + if (!str || str === '') return []; + try { + const parsed = JSON.parse(str); + return Array.isArray(parsed) ? parsed : []; + } catch { + return []; + } +}; + +/** + * Transform database record to include deserialized arrays + */ +const transformAccessList = (accessList: any): any => { + if (!accessList) return accessList; + return { + ...accessList, + allowedIps: deserializeArray(accessList.allowedIps), + }; +}; + /** * Repository for Access Lists data access */ @@ -71,7 +100,7 @@ export class AccessListsRepository { }); return { - accessLists, + accessLists: accessLists.map(transformAccessList), pagination: { page, limit, @@ -87,7 +116,7 @@ export class AccessListsRepository { * Find access list by ID */ async findById(id: string): Promise { - return prisma.accessList.findUnique({ + const accessList = await prisma.accessList.findUnique({ where: { id }, include: { authUsers: true, @@ -104,13 +133,14 @@ export class AccessListsRepository { }, }, }); + return transformAccessList(accessList); } /** * Find access list by name */ async findByName(name: string): Promise { - return prisma.accessList.findUnique({ + const accessList = await prisma.accessList.findUnique({ where: { name }, include: { authUsers: true, @@ -127,17 +157,19 @@ export class AccessListsRepository { }, }, }); + return transformAccessList(accessList); } /** * Create new access list */ async create(data: CreateAccessListInput): Promise { - const { authUsers, domainIds, ...accessListData } = data; + const { authUsers, domainIds, allowedIps, ...accessListData } = data; - return prisma.accessList.create({ + const accessList = await prisma.accessList.create({ data: { ...accessListData, + allowedIps: serializeArray(allowedIps), authUsers: authUsers ? { create: authUsers.map((user) => ({ @@ -171,6 +203,7 @@ export class AccessListsRepository { }, }, }); + return transformAccessList(accessList); } /** @@ -180,10 +213,10 @@ export class AccessListsRepository { id: string, data: UpdateAccessListInput ): Promise { - const { authUsers, domainIds, ...accessListData } = data; + const { authUsers, domainIds, allowedIps, ...accessListData } = data; // Start a transaction to handle updates - return prisma.$transaction(async (tx) => { + const accessList = await prisma.$transaction(async (tx) => { // Get existing auth users to preserve passwords if not changed let existingAuthUsers: { username: string; passwordHash: string }[] = []; if (authUsers !== undefined) { @@ -207,11 +240,17 @@ export class AccessListsRepository { }); } + // Prepare update data + const updateData: any = { ...accessListData }; + if (allowedIps !== undefined) { + updateData.allowedIps = serializeArray(allowedIps); + } + // Update access list return tx.accessList.update({ where: { id }, data: { - ...accessListData, + ...updateData, authUsers: authUsers ? { create: authUsers.map((user) => { @@ -257,6 +296,7 @@ export class AccessListsRepository { }, }); }); + return transformAccessList(accessList); } /** @@ -272,7 +312,7 @@ export class AccessListsRepository { * Toggle access list enabled status */ async toggleEnabled(id: string, enabled: boolean): Promise { - return prisma.accessList.update({ + const accessList = await prisma.accessList.update({ where: { id }, data: { enabled }, include: { @@ -290,6 +330,7 @@ export class AccessListsRepository { }, }, }); + return transformAccessList(accessList); } /** @@ -358,7 +399,7 @@ export class AccessListsRepository { }, }); - return accessListDomains.map((ald) => ald.accessList); + return accessListDomains.map((ald) => transformAccessList(ald.accessList)); } /** diff --git a/apps/api/src/domains/access-lists/access-lists.types.ts b/apps/api/src/domains/access-lists/access-lists.types.ts index 0a2a5d7..5db35a9 100644 --- a/apps/api/src/domains/access-lists/access-lists.types.ts +++ b/apps/api/src/domains/access-lists/access-lists.types.ts @@ -11,9 +11,10 @@ export enum AccessListType { } /** - * Access List with relations + * Access List with relations and deserialized arrays */ -export type AccessListWithRelations = AccessList & { +export type AccessListWithRelations = Omit & { + allowedIps: string[]; // Deserialized from JSON string in database authUsers?: AccessListAuthUser[]; domains?: (AccessListDomain & { domain: { diff --git a/apps/api/src/domains/account/account.repository.ts b/apps/api/src/domains/account/account.repository.ts index 34732dd..9d3d7a0 100644 --- a/apps/api/src/domains/account/account.repository.ts +++ b/apps/api/src/domains/account/account.repository.ts @@ -1,5 +1,5 @@ import prisma from '../../config/database'; -import { ActivityType } from '@prisma/client'; +import { ActivityType } from '../../shared/types/enums'; import { UserWithTwoFactor, RequestMetadata, SessionData } from './account.types'; /** @@ -99,12 +99,12 @@ export class AccountRepository { userId, enabled: data.enabled, secret: data.secret, - backupCodes: data.backupCodes, + backupCodes: data.backupCodes ? JSON.stringify(data.backupCodes) : '[]', }, update: { enabled: data.enabled, ...(data.secret && { secret: data.secret }), - ...(data.backupCodes && { backupCodes: data.backupCodes }), + ...(data.backupCodes && { backupCodes: JSON.stringify(data.backupCodes) }), }, }); } @@ -125,7 +125,7 @@ export class AccountRepository { async createActivityLog( userId: string, action: string, - type: ActivityType, + type: ActivityType | string, // Accept both enum and string literals for SQLite compatibility metadata: RequestMetadata, success: boolean, details?: string @@ -134,7 +134,7 @@ export class AccountRepository { data: { userId, action, - type, + type: type as string, // SQLite stores as string ip: metadata.ip, userAgent: metadata.userAgent, success, diff --git a/apps/api/src/domains/alerts/alerts.repository.ts b/apps/api/src/domains/alerts/alerts.repository.ts index 944f25d..c978ef7 100644 --- a/apps/api/src/domains/alerts/alerts.repository.ts +++ b/apps/api/src/domains/alerts/alerts.repository.ts @@ -12,6 +12,32 @@ import { } from './dto'; import { NotificationChannel, AlertRuleWithChannels } from './alerts.types'; +/** + * Helper functions for SQLite JSON serialization/deserialization + */ +const deserializeConfig = (config: string | null): any => { + if (!config) return null; + try { + return JSON.parse(config); + } catch { + return null; + } +}; + +const serializeConfig = (config: any): string | null => { + if (!config) return null; + if (typeof config === 'string') return config; + return JSON.stringify(config); +}; + +const transformNotificationChannel = (channel: any): any => { + if (!channel) return channel; + return { + ...channel, + config: deserializeConfig(channel.config), + }; +}; + /** * Notification Channel Repository */ @@ -20,47 +46,51 @@ export class NotificationChannelRepository { * Get all notification channels */ async findAll(): Promise { - return await prisma.notificationChannel.findMany({ + const channels = await prisma.notificationChannel.findMany({ orderBy: { createdAt: 'desc' } - }) as NotificationChannel[]; + }); + return channels.map(transformNotificationChannel) as NotificationChannel[]; } /** * Get single notification channel by ID */ async findById(id: string): Promise { - return await prisma.notificationChannel.findUnique({ + const channel = await prisma.notificationChannel.findUnique({ where: { id } - }) as NotificationChannel | null; + }); + return transformNotificationChannel(channel) as NotificationChannel | null; } /** * Get multiple channels by IDs */ async findByIds(ids: string[]): Promise { - return await prisma.notificationChannel.findMany({ + const channels = await prisma.notificationChannel.findMany({ where: { id: { in: ids } } - }) as NotificationChannel[]; + }); + return channels.map(transformNotificationChannel) as NotificationChannel[]; } /** * Create notification channel */ async create(data: CreateNotificationChannelDto): Promise { - return await prisma.notificationChannel.create({ + const channel = await prisma.notificationChannel.create({ data: { name: data.name, type: data.type as any, enabled: data.enabled !== undefined ? data.enabled : true, - config: data.config as any + config: serializeConfig(data.config) as any } - }) as NotificationChannel; + }); + return transformNotificationChannel(channel) as NotificationChannel; } /** @@ -71,12 +101,13 @@ export class NotificationChannelRepository { if (data.name) updateData.name = data.name; if (data.type) updateData.type = data.type; if (data.enabled !== undefined) updateData.enabled = data.enabled; - if (data.config) updateData.config = data.config; + if (data.config) updateData.config = serializeConfig(data.config); - return await prisma.notificationChannel.update({ + const channel = await prisma.notificationChannel.update({ where: { id }, data: updateData - }) as NotificationChannel; + }); + return transformNotificationChannel(channel) as NotificationChannel; } /** diff --git a/apps/api/src/domains/auth/auth.repository.ts b/apps/api/src/domains/auth/auth.repository.ts index 8fcf43d..918b54d 100644 --- a/apps/api/src/domains/auth/auth.repository.ts +++ b/apps/api/src/domains/auth/auth.repository.ts @@ -1,6 +1,6 @@ import prisma from '../../config/database'; import { UserWithTwoFactor, RefreshTokenWithUser, RequestMetadata } from './auth.types'; -import { ActivityType } from '@prisma/client'; +import { ActivityType } from '../../shared/types/enums'; /** * Auth repository - Handles all Prisma database operations for authentication @@ -36,7 +36,7 @@ export class AuthRepository { async createActivityLog( userId: string | null, action: string, - type: ActivityType, + type: ActivityType | string, // Accept both enum and string literals for SQLite compatibility metadata: RequestMetadata, success: boolean, details?: string @@ -45,7 +45,7 @@ export class AuthRepository { data: { userId, action, - type, + type: type as string, // SQLite stores as string ip: metadata.ip, userAgent: metadata.userAgent, success, diff --git a/apps/api/src/domains/backup/backup.service.ts b/apps/api/src/domains/backup/backup.service.ts index a2a4655..15ab9d1 100644 --- a/apps/api/src/domains/backup/backup.service.ts +++ b/apps/api/src/domains/backup/backup.service.ts @@ -235,15 +235,15 @@ export class BackupService { schedule: { connect: { id } }, filename, filepath, - size: BigInt(stats.size), + size: Number(stats.size), // Convert to Number for SQLite (Int type) status: 'success', type: 'manual', - metadata: { + metadata: JSON.stringify({ domainsCount: backupData.domains.length, sslCount: backupData.ssl.length, modsecRulesCount: backupData.modsec.customRules.length, aclRulesCount: backupData.acl.length, - }, + }), }); // Update schedule status @@ -462,7 +462,7 @@ export class BackupService { return { domainName: s.domain?.name || '', commonName: s.commonName, - sans: s.sans, + sans: JSON.parse(s.sans || '[]'), // Deserialize from JSON string issuer: s.issuer, autoRenew: s.autoRenew, validFrom: s.validFrom, @@ -475,7 +475,7 @@ export class BackupService { return { domainName: s.domain.name, commonName: s.commonName, - sans: s.sans, + sans: JSON.parse(s.sans || '[]'), // Deserialize from JSON string issuer: s.issuer, autoRenew: s.autoRenew, validFrom: s.validFrom, diff --git a/apps/api/src/domains/cluster/cluster.repository.ts b/apps/api/src/domains/cluster/cluster.repository.ts index 567fa1a..e8e10af 100644 --- a/apps/api/src/domains/cluster/cluster.repository.ts +++ b/apps/api/src/domains/cluster/cluster.repository.ts @@ -11,7 +11,7 @@ export class ClusterRepository { async findByName(name: string): Promise { return prisma.slaveNode.findUnique({ where: { name } - }); + }) as Promise; } /** @@ -34,7 +34,7 @@ export class ClusterRepository { updatedAt: true // DO NOT return apiKey } - }); + }) as Promise; } /** @@ -70,7 +70,7 @@ export class ClusterRepository { ...data, status: data.status as any } - }); + }) as Promise; } /** @@ -95,7 +95,7 @@ export class ClusterRepository { updatedAt: true // DO NOT return apiKey } - }); + }) as Promise; } /** @@ -232,7 +232,7 @@ export class ClusterRepository { sslCertificates: ssl.map(s => ({ domainName: s.domain?.name, commonName: s.commonName, - sans: s.sans, + sans: JSON.parse(s.sans || '[]'), // Deserialize from JSON string issuer: s.issuer, certificate: s.certificate, privateKey: s.privateKey, @@ -409,7 +409,7 @@ export class ClusterRepository { where: { domainId: domain.id }, update: { commonName: sslData.commonName, - sans: sslData.sans || [], + sans: JSON.stringify(sslData.sans || []), // Serialize array for SQLite issuer: sslData.issuer, certificate: sslData.certificate, privateKey: sslData.privateKey, @@ -421,7 +421,7 @@ export class ClusterRepository { create: { domainId: domain.id, commonName: sslData.commonName, - sans: sslData.sans || [], + sans: JSON.stringify(sslData.sans || []), // Serialize array for SQLite issuer: sslData.issuer, certificate: sslData.certificate, privateKey: sslData.privateKey, diff --git a/apps/api/src/domains/domains/domains.repository.ts b/apps/api/src/domains/domains/domains.repository.ts index 589bff8..5f5f9af 100644 --- a/apps/api/src/domains/domains/domains.repository.ts +++ b/apps/api/src/domains/domains/domains.repository.ts @@ -9,6 +9,40 @@ import { import { PaginationMeta } from '../../shared/types/common.types'; import { DEFAULT_CLIENT_MAX_BODY_SIZE } from '../../shared/constants/domain.constants'; +/** + * Helper functions for SQLite JSON/array deserialization + */ +const deserializeCustomLocations = (str: string | null): any => { + if (!str) return null; + try { + return JSON.parse(str); + } catch { + return null; + } +}; + +const deserializeRealIpCustomCidrs = (str: string | null): string[] => { + if (!str) return []; + try { + const parsed = JSON.parse(str); + return Array.isArray(parsed) ? parsed : []; + } catch { + return []; + } +}; + +/** + * Transform database domain to include deserialized JSON fields + */ +const transformDomain = (domain: any): any => { + if (!domain) return domain; + return { + ...domain, + customLocations: deserializeCustomLocations(domain.customLocations), + realIpCustomCidrs: deserializeRealIpCustomCidrs(domain.realIpCustomCidrs), + }; +}; + /** * Repository for domain database operations */ @@ -82,7 +116,7 @@ export class DomainsRepository { const totalPages = Math.ceil(totalCount / limitNum); return { - domains: domains as DomainWithRelations[], + domains: domains.map(transformDomain) as DomainWithRelations[], pagination: { page: pageNum, limit: limitNum, @@ -135,7 +169,7 @@ export class DomainsRepository { }, }); - return domain as DomainWithRelations | null; + return transformDomain(domain) as DomainWithRelations | null; } /** @@ -149,13 +183,13 @@ export class DomainsRepository { modsecEnabled: input.modsecEnabled !== undefined ? input.modsecEnabled : true, realIpEnabled: input.realIpConfig?.realIpEnabled || false, realIpCloudflare: input.realIpConfig?.realIpCloudflare || false, - realIpCustomCidrs: input.realIpConfig?.realIpCustomCidrs || [], + realIpCustomCidrs: JSON.stringify(input.realIpConfig?.realIpCustomCidrs || []), // Advanced configuration hstsEnabled: input.advancedConfig?.hstsEnabled || false, http2Enabled: input.advancedConfig?.http2Enabled !== undefined ? input.advancedConfig.http2Enabled : true, grpcEnabled: input.advancedConfig?.grpcEnabled || false, clientMaxBodySize: input.advancedConfig?.clientMaxBodySize !== undefined ? input.advancedConfig.clientMaxBodySize : DEFAULT_CLIENT_MAX_BODY_SIZE, - customLocations: input.advancedConfig?.customLocations ? JSON.parse(JSON.stringify(input.advancedConfig.customLocations)) : null, + customLocations: input.advancedConfig?.customLocations ? JSON.stringify(input.advancedConfig.customLocations) : null, upstreams: { create: input.upstreams.map((u: CreateUpstreamData) => ({ host: u.host, @@ -188,7 +222,7 @@ export class DomainsRepository { }, }); - return domain as DomainWithRelations; + return transformDomain(domain) as DomainWithRelations; } /** @@ -205,7 +239,7 @@ export class DomainsRepository { }, }); - return domain as DomainWithRelations; + return transformDomain(domain) as DomainWithRelations; } /** @@ -249,7 +283,7 @@ export class DomainsRepository { : currentDomain.realIpCloudflare, realIpCustomCidrs: input.realIpConfig?.realIpCustomCidrs !== undefined - ? input.realIpConfig.realIpCustomCidrs + ? JSON.stringify(input.realIpConfig.realIpCustomCidrs) : currentDomain.realIpCustomCidrs, // Advanced configuration hstsEnabled: @@ -270,7 +304,7 @@ export class DomainsRepository { : currentDomain.clientMaxBodySize, customLocations: input.advancedConfig?.customLocations !== undefined - ? JSON.parse(JSON.stringify(input.advancedConfig.customLocations)) + ? JSON.stringify(input.advancedConfig.customLocations) : currentDomain.customLocations, }, }); diff --git a/apps/api/src/domains/domains/services/nginx-config.service.ts b/apps/api/src/domains/domains/services/nginx-config.service.ts index 1628341..65aaf56 100644 --- a/apps/api/src/domains/domains/services/nginx-config.service.ts +++ b/apps/api/src/domains/domains/services/nginx-config.service.ts @@ -409,8 +409,9 @@ ${customLocations} } // Custom CIDR ranges - if (domain.realIpCustomCidrs && domain.realIpCustomCidrs.length > 0) { - domain.realIpCustomCidrs.forEach(cidr => { + const customCidrs = JSON.parse(domain.realIpCustomCidrs || '[]'); // Deserialize from JSON string + if (customCidrs && customCidrs.length > 0) { + customCidrs.forEach((cidr: string) => { lines.push(` set_real_ip_from ${cidr};`); }); } diff --git a/apps/api/src/domains/ssl/services/ssl-scheduler.service.ts b/apps/api/src/domains/ssl/services/ssl-scheduler.service.ts index 22b5c74..2d54000 100644 --- a/apps/api/src/domains/ssl/services/ssl-scheduler.service.ts +++ b/apps/api/src/domains/ssl/services/ssl-scheduler.service.ts @@ -88,11 +88,11 @@ class SSLSchedulerService { privateKey: certFiles.privateKey, chain: certFiles.chain, commonName: certInfo.commonName, - sans: certInfo.sans, + sans: JSON.stringify(certInfo.sans), // Serialize array for SQLite issuer: certInfo.issuer, subject: certInfo.subject, - subjectDetails: certInfo.subjectDetails, - issuerDetails: certInfo.issuerDetails, + subjectDetails: JSON.stringify(certInfo.subjectDetails), // Serialize object for SQLite + issuerDetails: JSON.stringify(certInfo.issuerDetails), // Serialize object for SQLite serialNumber: certInfo.serialNumber, validFrom: certInfo.validFrom, validTo: certInfo.validTo, diff --git a/apps/api/src/domains/ssl/ssl.repository.ts b/apps/api/src/domains/ssl/ssl.repository.ts index 2b91d81..d131cbb 100644 --- a/apps/api/src/domains/ssl/ssl.repository.ts +++ b/apps/api/src/domains/ssl/ssl.repository.ts @@ -2,6 +2,66 @@ import prisma from '../../config/database'; import { SSLCertificate, Prisma } from '@prisma/client'; import { SSLCertificateWithDomain } from './ssl.types'; +/** + * Helper functions for SQLite JSON/array serialization and deserialization + */ +const deserializeSans = (sans: string | null): string[] => { + if (!sans) return []; + try { + const parsed = JSON.parse(sans); + return Array.isArray(parsed) ? parsed : []; + } catch { + return []; + } +}; + +const deserializeJsonField = (field: string | null): any => { + if (!field) return null; + try { + return JSON.parse(field); + } catch { + return null; + } +}; + +/** + * Transform SSL certificate from database (deserialize JSON fields) + */ +const transformSSLCertificate = (cert: any): any => { + if (!cert) return cert; + return { + ...cert, + sans: deserializeSans(cert.sans), + subjectDetails: deserializeJsonField(cert.subjectDetails), + issuerDetails: deserializeJsonField(cert.issuerDetails), + }; +}; + +/** + * Serialize SSL certificate data for database (convert to JSON strings) + */ +export const serializeSSLData = (data: any): any => { + const serialized: any = { ...data }; + + if (data.sans !== undefined) { + serialized.sans = Array.isArray(data.sans) ? JSON.stringify(data.sans) : data.sans; + } + + if (data.subjectDetails !== undefined && data.subjectDetails !== null) { + serialized.subjectDetails = typeof data.subjectDetails === 'object' + ? JSON.stringify(data.subjectDetails) + : data.subjectDetails; + } + + if (data.issuerDetails !== undefined && data.issuerDetails !== null) { + serialized.issuerDetails = typeof data.issuerDetails === 'object' + ? JSON.stringify(data.issuerDetails) + : data.issuerDetails; + } + + return serialized; +}; + /** * SSL Repository - Handles all database operations for SSL certificates */ @@ -10,7 +70,7 @@ export class SSLRepository { * Find all SSL certificates with domain information */ async findAll(): Promise { - return prisma.sSLCertificate.findMany({ + const certs = await prisma.sSLCertificate.findMany({ include: { domain: { select: { @@ -22,13 +82,14 @@ export class SSLRepository { }, orderBy: { validTo: 'asc' }, }); + return certs.map(transformSSLCertificate); } /** * Find SSL certificate by ID */ async findById(id: string): Promise { - return prisma.sSLCertificate.findUnique({ + const cert = await prisma.sSLCertificate.findUnique({ where: { id }, include: { domain: { @@ -40,15 +101,17 @@ export class SSLRepository { }, }, }); + return transformSSLCertificate(cert); } /** * Find SSL certificate by domain ID */ async findByDomainId(domainId: string): Promise { - return prisma.sSLCertificate.findUnique({ + const cert = await prisma.sSLCertificate.findUnique({ where: { domainId }, }); + return transformSSLCertificate(cert); } /** @@ -57,8 +120,8 @@ export class SSLRepository { async create( data: Prisma.SSLCertificateCreateInput ): Promise { - return prisma.sSLCertificate.create({ - data, + const cert = await prisma.sSLCertificate.create({ + data: serializeSSLData(data), include: { domain: { select: { @@ -69,6 +132,7 @@ export class SSLRepository { }, }, }); + return transformSSLCertificate(cert); } /** @@ -78,9 +142,9 @@ export class SSLRepository { id: string, data: Prisma.SSLCertificateUpdateInput ): Promise { - return prisma.sSLCertificate.update({ + const cert = await prisma.sSLCertificate.update({ where: { id }, - data, + data: serializeSSLData(data), include: { domain: { select: { @@ -91,6 +155,7 @@ export class SSLRepository { }, }, }); + return transformSSLCertificate(cert); } /** diff --git a/apps/api/src/domains/users/users.repository.ts b/apps/api/src/domains/users/users.repository.ts index 49a30b4..981d599 100644 --- a/apps/api/src/domains/users/users.repository.ts +++ b/apps/api/src/domains/users/users.repository.ts @@ -37,7 +37,7 @@ export class UsersRepository { orderBy: { createdAt: 'desc', }, - }); + }) as Promise; } /** @@ -47,7 +47,7 @@ export class UsersRepository { return prisma.user.findUnique({ where: { id }, select: USER_WITH_PROFILE_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -57,7 +57,7 @@ export class UsersRepository { return prisma.user.findUnique({ where: { username }, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -67,7 +67,7 @@ export class UsersRepository { return prisma.user.findUnique({ where: { email }, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -79,7 +79,7 @@ export class UsersRepository { OR: [{ username }, { email }], }, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -99,7 +99,7 @@ export class UsersRepository { language: data.language || 'en', }, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -111,7 +111,7 @@ export class UsersRepository { if (data.username !== undefined) updateData.username = data.username; if (data.email !== undefined) updateData.email = data.email; if (data.fullName !== undefined) updateData.fullName = data.fullName; - if (data.role !== undefined) updateData.role = data.role as any; + if (data.role !== undefined) updateData.role = data.role; if (data.status !== undefined) updateData.status = data.status; if (data.phone !== undefined) updateData.phone = data.phone; if (data.timezone !== undefined) updateData.timezone = data.timezone; @@ -122,7 +122,7 @@ export class UsersRepository { where: { id }, data: updateData, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** @@ -133,7 +133,7 @@ export class UsersRepository { where: { id }, data: { status }, select: USER_SELECT_FIELDS, - }); + }) as Promise; } /** diff --git a/apps/api/src/shared/types/enums.ts b/apps/api/src/shared/types/enums.ts new file mode 100644 index 0000000..61fe154 --- /dev/null +++ b/apps/api/src/shared/types/enums.ts @@ -0,0 +1,158 @@ +/** + * Shared enum types for SQLite compatibility + * These replace the Prisma enums that are no longer available with SQLite + */ + +// User-related enums +export enum UserRole { + ADMIN = 'admin', + MODERATOR = 'moderator', + VIEWER = 'viewer' +} + +export enum UserStatus { + ACTIVE = 'active', + INACTIVE = 'inactive', + SUSPENDED = 'suspended' +} + +// Activity log enums +export enum ActivityType { + LOGIN = 'login', + LOGOUT = 'logout', + CONFIG_CHANGE = 'config_change', + USER_ACTION = 'user_action', + SECURITY = 'security', + SYSTEM = 'system' +} + +// Domain-related enums +export enum DomainStatus { + ACTIVE = 'active', + INACTIVE = 'inactive', + ERROR = 'error' +} + +export enum UpstreamStatus { + UP = 'up', + DOWN = 'down', + CHECKING = 'checking' +} + +export enum LoadBalancerAlgorithm { + ROUND_ROBIN = 'round_robin', + LEAST_CONN = 'least_conn', + IP_HASH = 'ip_hash' +} + +export enum SSLStatus { + VALID = 'valid', + EXPIRING = 'expiring', + EXPIRED = 'expired' +} + +// Notification and alert enums +export enum NotificationChannelType { + EMAIL = 'email', + TELEGRAM = 'telegram' +} + +export enum AlertSeverity { + CRITICAL = 'critical', + WARNING = 'warning', + INFO = 'info' +} + +// ACL-related enums +export enum AclType { + WHITELIST = 'whitelist', + BLACKLIST = 'blacklist' +} + +export enum AclField { + IP = 'ip', + GEOIP = 'geoip', + USER_AGENT = 'user_agent', + URL = 'url', + METHOD = 'method', + HEADER = 'header' +} + +export enum AclOperator { + EQUALS = 'equals', + CONTAINS = 'contains', + REGEX = 'regex' +} + +export enum AclAction { + ALLOW = 'allow', + DENY = 'deny', + CHALLENGE = 'challenge' +} + +// Access list enums +export enum AccessListType { + IP_WHITELIST = 'ip_whitelist', + HTTP_BASIC_AUTH = 'http_basic_auth', + COMBINED = 'combined' +} + +// Backup-related enums +export enum BackupStatus { + SUCCESS = 'success', + FAILED = 'failed', + RUNNING = 'running', + PENDING = 'pending' +} + +// Cluster-related enums +export enum SlaveNodeStatus { + ONLINE = 'online', + OFFLINE = 'offline', + SYNCING = 'syncing', + ERROR = 'error' +} + +export enum SyncLogStatus { + SUCCESS = 'success', + FAILED = 'failed', + PARTIAL = 'partial', + RUNNING = 'running' +} + +export enum SyncLogType { + FULL_SYNC = 'full_sync', + INCREMENTAL_SYNC = 'incremental_sync', + HEALTH_CHECK = 'health_check' +} + +export enum NodeMode { + MASTER = 'master', + SLAVE = 'slave' +} + +// Network Load Balancer enums +export enum NLBStatus { + ACTIVE = 'active', + INACTIVE = 'inactive', + ERROR = 'error' +} + +export enum NLBProtocol { + TCP = 'tcp', + UDP = 'udp', + TCP_UDP = 'tcp_udp' +} + +export enum NLBAlgorithm { + ROUND_ROBIN = 'round_robin', + LEAST_CONN = 'least_conn', + IP_HASH = 'ip_hash', + HASH = 'hash' +} + +export enum NLBUpstreamStatus { + UP = 'up', + DOWN = 'down', + CHECKING = 'checking' +} diff --git a/docker-compose.db.yml b/docker-compose.db.yml index 109e72f..478e1cf 100644 --- a/docker-compose.db.yml +++ b/docker-compose.db.yml @@ -1,29 +1,15 @@ -services: - postgres: - image: postgres:15-alpine - container_name: nginx-love-postgres - restart: unless-stopped - environment: - POSTGRES_DB: ${DB_NAME:-nginx_love_db} - POSTGRES_USER: ${DB_USER:-nginx_love_user} - POSTGRES_PASSWORD: ${DB_PASSWORD:-change_this_password} - ports: - # Bind only to localhost for security (matches deploy.sh) - - "127.0.0.1:${DB_PORT:-5432}:5432" - volumes: - # Use consistent volume name with deploy.sh - - nginx-love-postgres-data:/var/lib/postgresql/data - networks: - - nginx-love-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-nginx_love_user}"] - interval: 10s - timeout: 5s - retries: 5 +# This file is no longer needed as we're using SQLite instead of PostgreSQL +# SQLite database will be stored as a file in apps/api/prisma/nginx_waf.db +# No Docker container is required for SQLite -volumes: - nginx-love-postgres-data: - driver: local +version: '3.8' + +# Note: This file is kept for backward compatibility but services are commented out +# To use SQLite, simply run the application - the database file will be created automatically + +# services: +# # PostgreSQL service removed - using SQLite instead +# # SQLite database file will be created at: apps/api/prisma/nginx_waf.db networks: nginx-love-network: diff --git a/docker-compose.yml b/docker-compose.yml index 29b296c..ffb4ab1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,32 +1,6 @@ version: '3.8' services: - # PostgreSQL Database - postgres: - image: postgres:15-alpine - container_name: nginx-love-postgres - hostname: postgres - restart: unless-stopped - environment: - POSTGRES_DB: ${DB_NAME:-nginx_waf} - POSTGRES_USER: ${DB_USER:-postgres} - POSTGRES_PASSWORD: ${DB_PASSWORD:-postgres} - POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C" - ports: - - "127.0.0.1:5432:5432" - volumes: - - postgres_data:/var/lib/postgresql/data - - ./docker/database.sql:/docker-entrypoint-initdb.d/01-database.sql:ro - - ./apps/api/prisma/migrations:/docker-entrypoint-initdb.d/migrations - networks: - - nginx-love-network - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-nginx_waf}"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 30s - # Backend API Service backend: image: vouu/nginx-waf-backend @@ -40,8 +14,8 @@ services: # - "443:443" # # add more port for NLB environment: - # Database Configuration - DATABASE_URL: "postgresql://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@localhost:5432/${DB_NAME:-nginx_waf}?schema=public" + # Database Configuration - SQLite (file-based, no separate service needed) + DATABASE_URL: "file:/app/data/nginx_waf.db" # Server Configuration PORT: ${API_PORT:-3001} @@ -75,15 +49,13 @@ services: SMTP_USER: ${SMTP_USER:-user@example.com} SMTP_PASS: ${SMTP_PASS:-change-this-to-random-password} volumes: + - sqlite_data:/app/data - backup:/var/backups - nginx_modules:/usr/lib/nginx - nginx_conf:/etc/nginx - nginx_logs:/var/log - acme_challenge:/var/www/html/.well-known/acme-challenge - ./config/nginx.conf:/etc/nginx/nginx.conf:ro - depends_on: - postgres: - condition: service_healthy healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3001/api/health"] interval: 30s @@ -118,7 +90,7 @@ services: start_period: 30s volumes: - postgres_data: + sqlite_data: driver: local backup: driver: local diff --git a/docs/MIGRATION_POSTGRES_TO_SQLITE.md b/docs/MIGRATION_POSTGRES_TO_SQLITE.md new file mode 100644 index 0000000..4f02e1e --- /dev/null +++ b/docs/MIGRATION_POSTGRES_TO_SQLITE.md @@ -0,0 +1,287 @@ +# PostgreSQL to SQLite Migration Guide + +This guide explains how to migrate your existing Nginx Love installation from PostgreSQL to SQLite without losing any data. + +## Prerequisites + +- Existing Nginx Love installation running with PostgreSQL +- Backup of your PostgreSQL database (recommended) +- Root or sudo access to the server +- At least 1GB of free disk space for backups + +## Migration Process + +### Step 1: Backup Your Current System + +Before starting the migration, create a manual backup: + +```bash +# Backup PostgreSQL using pg_dump (optional but recommended) +docker exec nginx-love-postgres pg_dump -U nginx_love_user nginx_love_db > /root/postgres-backup-$(date +%Y%m%d).sql + +# Or use the application's built-in backup feature +# Navigate to Settings > Backup & Restore in the web interface +``` + +### Step 2: Run the Migration Script + +The migration script will automatically: +1. Export all data from PostgreSQL to JSON +2. Update the Prisma schema to use SQLite +3. Create a new SQLite database +4. Import all data from PostgreSQL into SQLite +5. Backup your original configuration + +```bash +cd /path/to/nginx-love +sudo bash scripts/migrate-postgres-to-sqlite.sh +``` + +**What the script does:** +- Exports all database tables to `backups/postgres-to-sqlite-YYYYMMDD_HHMMSS/postgres-export.json` +- Backs up your current `.env` file +- Updates `DATABASE_URL` to use SQLite +- Creates a new SQLite database at `apps/api/prisma/nginx_waf.db` +- Imports all data with proper type conversions +- Generates a detailed migration log + +### Step 3: Restart the Application + +After migration completes successfully: + +```bash +# Restart backend service +sudo systemctl restart nginx-love-backend + +# Restart frontend service +sudo systemctl restart nginx-love-frontend + +# Check service status +sudo systemctl status nginx-love-backend +sudo systemctl status nginx-love-frontend +``` + +### Step 4: Verify the Migration + +1. **Check the web interface:** + - Navigate to http://YOUR_SERVER_IP:8080 + - Log in with your credentials + - Verify all data is present: + - Users and profiles + - Domains and upstreams + - SSL certificates + - ModSecurity rules + - Access lists + - Performance metrics + - Alert rules and history + +2. **Check the database file:** + ```bash + ls -lh /path/to/apps/api/prisma/nginx_waf.db + + # Verify database integrity + sqlite3 /path/to/apps/api/prisma/nginx_waf.db "PRAGMA integrity_check;" + ``` + +3. **Review migration logs:** + ```bash + cat /path/to/backups/postgres-to-sqlite-*/migration.log + ``` + +### Step 5: Clean Up PostgreSQL (Optional) + +Once you've verified everything works correctly with SQLite, you can optionally remove PostgreSQL: + +```bash +# Stop PostgreSQL container +docker stop nginx-love-postgres + +# Remove PostgreSQL container +docker rm nginx-love-postgres + +# Remove PostgreSQL volume (WARNING: This deletes the PostgreSQL database permanently) +docker volume rm nginx-love-postgres-data +``` + +**Note:** Keep your backup directory for at least 30 days in case you need to rollback. + +## Rollback Instructions + +If you encounter issues after migration, you can rollback to PostgreSQL: + +### Step 1: Stop the Application + +```bash +sudo systemctl stop nginx-love-backend +sudo systemctl stop nginx-love-frontend +``` + +### Step 2: Restore PostgreSQL Configuration + +```bash +cd /path/to/nginx-love/apps/api + +# Find your backup directory +BACKUP_DIR=$(ls -td ../backups/postgres-to-sqlite-* | head -1) + +# Restore .env file +cp "$BACKUP_DIR/.env.backup" .env + +# Delete SQLite database +rm -f prisma/nginx_waf.db prisma/nginx_waf.db-journal +``` + +### Step 3: Start PostgreSQL Container + +```bash +# If you stopped but didn't remove the container +docker start nginx-love-postgres + +# Wait for PostgreSQL to be ready +sleep 10 +``` + +### Step 4: Restart the Application + +```bash +sudo systemctl start nginx-love-backend +sudo systemctl start nginx-love-frontend +``` + +## Data Type Conversions + +The migration script automatically handles the following conversions: + +### Enums → Strings +All enum types are converted to string values: +``` +UserRole: "admin", "moderator", "viewer" +UserStatus: "active", "inactive", "suspended" +DomainStatus: "active", "inactive", "error" +... (24 enums total) +``` + +### JSON Fields → Strings +JSON data is serialized to strings: +``` +SSLCertificate.subjectDetails: JSON object → JSON string +NotificationChannel.config: JSON object → JSON string +Domain.customLocations: JSON object → JSON string +``` + +### Arrays → Strings +Array fields are converted to JSON strings: +``` +TwoFactorAuth.backupCodes: ["code1", "code2"] → '["code1", "code2"]' +SSLCertificate.sans: ["domain1", "domain2"] → '["domain1", "domain2"]' +Domain.realIpCustomCidrs: ["10.0.0.0/8"] → '["10.0.0.0/8"]' +``` + +## Troubleshooting + +### Migration Script Fails + +**Error: "Failed to export data from PostgreSQL"** +- **Cause:** PostgreSQL is not running or DATABASE_URL is incorrect +- **Solution:** + ```bash + docker ps | grep postgres # Check if container is running + docker start nginx-love-postgres # Start if stopped + cat apps/api/.env | grep DATABASE_URL # Verify connection string + ``` + +**Error: "Failed to import data into SQLite"** +- **Cause:** Data type conversion issue or disk space +- **Solution:** + ```bash + df -h # Check disk space + # Review import log for specific errors + cat backups/postgres-to-sqlite-*/migration.log + # The script automatically restores PostgreSQL config on import failure + ``` + +### Post-Migration Issues + +**Application won't start after migration** +- Check logs: `sudo journalctl -u nginx-love-backend -f` +- Verify DATABASE_URL: `cat apps/api/.env | grep DATABASE_URL` +- Should be: `DATABASE_URL="file:./nginx_waf.db"` + +**Data is missing after migration** +- Check migration log for errors: `cat backups/postgres-to-sqlite-*/migration.log` +- Verify exported data: `cat backups/postgres-to-sqlite-*/postgres-export.json` +- If data was exported but not imported, you can re-run just the import: + ```bash + cd apps/api + pnpm ts-node /path/to/backups/postgres-to-sqlite-*/import-sqlite-data.ts /path/to/backups/postgres-to-sqlite-* + ``` + +**Performance issues** +- SQLite is slower for large datasets with many concurrent writes +- Recommended for deployments with < 100 domains and < 10 concurrent users +- For larger deployments, consider keeping PostgreSQL + +## Performance Considerations + +### When to Use SQLite +✅ **Good for:** +- Small to medium deployments (< 100 domains) +- Low concurrent user count (< 10 users) +- Development and testing environments +- Single-server deployments +- Easy backup/restore requirements + +### When to Keep PostgreSQL +⚠️ **Consider PostgreSQL if:** +- Large deployment (> 100 domains) +- High concurrent user count (> 10 users) +- High-availability requirements +- Multi-server/cluster deployments +- Heavy write operations + +## Support + +If you encounter issues during migration: + +1. **Check the migration log:** + ```bash + cat /path/to/backups/postgres-to-sqlite-*/migration.log + ``` + +2. **Review the backup:** + - Original .env: `backups/postgres-to-sqlite-*/backup` + - Exported data: `backups/postgres-to-sqlite-*/postgres-export.json` + +3. **Community support:** + - GitHub Issues: https://github.com/TinyActive/nginx-love/issues + - Telegram: https://t.me/nginxlove + +4. **Always keep backups:** + - Keep the backup directory for at least 30 days + - Test rollback procedure before deleting PostgreSQL data + +## FAQ + +**Q: Will this affect my running services?** +A: Yes, the application will need to be restarted after migration. Plan for a brief maintenance window (5-15 minutes). + +**Q: Can I migrate back to PostgreSQL later?** +A: Yes, you can rollback using the backup created during migration. However, any data created after migration will need to be manually exported. + +**Q: How long does migration take?** +A: Typically 2-5 minutes for small databases (< 1000 records), up to 15 minutes for larger databases. + +**Q: Will my SSL certificates and configurations be preserved?** +A: Yes, all data including SSL certificates, domain configurations, ModSecurity rules, and access lists are preserved. + +**Q: Do I need to reconfigure anything after migration?** +A: No, all configurations are preserved. Just restart the services and verify everything is working. + +## File Locations + +After migration: +- **SQLite Database:** `apps/api/prisma/nginx_waf.db` +- **Backup Directory:** `backups/postgres-to-sqlite-YYYYMMDD_HHMMSS/` +- **Migration Log:** `backups/postgres-to-sqlite-YYYYMMDD_HHMMSS/migration.log` +- **Exported Data:** `backups/postgres-to-sqlite-YYYYMMDD_HHMMSS/postgres-export.json` +- **Original .env:** `backups/postgres-to-sqlite-YYYYMMDD_HHMMSS/.env.backup` diff --git a/docs/SQLITE_BUILD_FIXES_STATUS.md b/docs/SQLITE_BUILD_FIXES_STATUS.md new file mode 100644 index 0000000..4d6bba6 --- /dev/null +++ b/docs/SQLITE_BUILD_FIXES_STATUS.md @@ -0,0 +1,194 @@ +# SQLite Migration - Build Fixes Status + +## Overview + +The PostgreSQL to SQLite migration required extensive TypeScript code changes to handle SQLite's limitations with enums, JSON fields, and arrays. + +## Progress + +**Build Errors**: 31 → 11 (65% complete) + +## Completed Fixes + +### 1. Enum Types (✅ COMPLETE) +- Created `src/shared/types/enums.ts` with all 24 enum definitions +- Updated imports in `account.repository.ts` and `auth.repository.ts` +- Modified `createActivityLog` to accept string literals +- Added type assertions in `users.repository.ts` + +### 2. Array Serialization (✅ COMPLETE) +**Access Lists:** +- `allowedIps`: String[] → String (JSON serialized) +- Added helper functions: `serializeArray()`, `deserializeArray()` +- Transform function for repository returns + +**Domains:** +- `realIpCustomCidrs`: String[] → String (JSON serialized) + +**Account:** +- `backupCodes`: String[] → String (JSON serialized) + +**Cluster/SSL:** +- `sans`: String[] → String (JSON serialized) + +### 3. JSON Fields (✅ PARTIAL) +**Backup Service:** +- `metadata`: Object → String (JSON serialized) + +### 4. Type Assertions (✅ COMPLETE) +- All user repository methods +- Cluster repository (partial) + +## Remaining Issues (11 errors) + +### 1. SSL Certificate Deserialization +**Files affected:** +- `domains/backup/backup.service.ts` (line 515) +- `domains/cluster/cluster.repository.ts` (line 232) + +**Issue**: SSL certificates from database have `sans` as String, but code expects String[] + +**Fix needed**: +```typescript +// In SSL repository or service +const deserializeSans = (sans: string | null): string[] => { + if (!sans) return []; + try { + return JSON.parse(sans); + } catch { + return []; + } +}; + +// Apply transformation after DB queries +const sslCert = await prisma.sSLCertificate.findUnique(...); +return { + ...sslCert, + sans: deserializeSans(sslCert.sans) +}; +``` + +### 2. Domain Nginx Config Service +**File**: `domains/domains/services/nginx-config.service.ts` (line 413) + +**Issue**: `realIpCustomCidrs` is String but code calls `.forEach()` + +**Fix needed**: +```typescript +// Deserialize before use +const cidrs = JSON.parse(domain.realIpCustomCidrs || '[]'); +cidrs.forEach((cidr: string) => { + // ... +}); +``` + +### 3. SSL Scheduler Service +**File**: `domains/ssl/services/ssl-scheduler.service.ts` (lines 91, 94, 95) + +**Issue**: Trying to store String[] and Objects where Prisma expects String + +**Fix needed**: +```typescript +await prisma.sSLCertificate.update({ + data: { + sans: JSON.stringify(sans), // Serialize array + subjectDetails: JSON.stringify(subjectDetails), // Serialize object + issuerDetails: JSON.stringify(issuerDetails), // Serialize object + } +}); +``` + +### 4. Cluster Repository Type Assertions +**File**: `domains/cluster/cluster.repository.ts` (lines 12, 21, 68, 80) + +**Issue**: Return types need type assertions + +**Fix needed**: +```typescript +async findByName(name: string): Promise { + return prisma.slaveNode.findUnique({ + where: { name } + }) as Promise; +} +``` + +## Pattern for Fixes + +### For Arrays (String[]) +1. **Serialize** when writing to DB: `JSON.stringify(array || [])` +2. **Deserialize** when reading from DB: `JSON.parse(str || '[]')` + +### For JSON Objects +1. **Serialize** when writing: `JSON.stringify(object)` +2. **Deserialize** when reading: `JSON.parse(str || '{}')` + +### For Enums +1. **Store** as string (no change needed) +2. **Type assert** on read if using custom types + +## Recommendations + +1. **Create Helper Module**: `src/utils/sqlite-helpers.ts` + ```typescript + export const serializeArray = (arr: string[]): string => JSON.stringify(arr || []); + export const deserializeArray = (str: string | null): string[] => { + try { return JSON.parse(str || '[]'); } + catch { return []; } + }; + + export const serializeJson = (obj: any): string => JSON.stringify(obj || {}); + export const deserializeJson = (str: string | null): T | null => { + try { return JSON.parse(str || 'null'); } + catch { return null; } + }; + ``` + +2. **Create Repository Transformers**: For each domain with arrays/JSON + ```typescript + const transformSSLCertificate = (cert: PrismaSSLCert): SSLCertificate => ({ + ...cert, + sans: deserializeArray(cert.sans), + subjectDetails: deserializeJson(cert.subjectDetails), + issuerDetails: deserializeJson(cert.issuerDetails), + }); + ``` + +3. **Apply Consistently**: Use transformers in all repository methods that return data + +## Testing After Fixes + +Once all 11 errors are fixed: + +```bash +# Test build +cd apps/api && pnpm build + +# Run application +pnpm dev + +# Test key features: +# - Create/update access lists (array handling) +# - View SSL certificates (sans deserialization) +# - Domain configuration (realIpCustomCidrs) +# - Backup/restore (metadata JSON) +``` + +## Migration Compatibility + +All fixes maintain API compatibility: +- ✅ Input types unchanged (still accept arrays/objects) +- ✅ Output types unchanged (transformers convert back to arrays/objects) +- ✅ Only internal storage format changed (SQLite compatibility) + +## Estimated Effort + +**Remaining work**: ~1-2 hours for experienced developer + +**Files to modify**: 5 files +- backup.service.ts +- cluster.repository.ts +- nginx-config.service.ts +- ssl-scheduler.service.ts +- Create sqlite-helpers.ts + +**Complexity**: Low - mostly repetitive serialize/deserialize calls diff --git a/docs/SQLITE_FEATURE_VERIFICATION.md b/docs/SQLITE_FEATURE_VERIFICATION.md new file mode 100644 index 0000000..37a6156 --- /dev/null +++ b/docs/SQLITE_FEATURE_VERIFICATION.md @@ -0,0 +1,291 @@ +# SQLite Migration - Feature Verification Guide + +This document provides a comprehensive checklist to verify all features work correctly after migrating from PostgreSQL to SQLite. + +## Overview + +The migration to SQLite required converting JSON and array fields to strings. All affected repositories now include: +- **Serialization** - Converting objects/arrays to JSON strings when writing to database +- **Deserialization** - Converting JSON strings back to objects/arrays when reading from database + +## Modified Repositories + +### 1. Domains Repository ✅ +**File**: `src/domains/domains/domains.repository.ts` + +**Fields Converted**: +- `customLocations` (Json → String) +- `realIpCustomCidrs` (String[] → String) + +**Features to Test**: +- [ ] Create domain with custom locations +- [ ] Create domain with custom Real IP CIDR ranges +- [ ] Update domain custom locations +- [ ] Update domain Real IP settings +- [ ] List all domains (verify custom locations display correctly) +- [ ] View domain details + +### 2. SSL Repository ✅ +**File**: `src/domains/ssl/ssl.repository.ts` + +**Fields Converted**: +- `sans` (String[] → String) - Subject Alternative Names +- `subjectDetails` (Json → String) - Certificate subject info +- `issuerDetails` (Json → String) - Certificate issuer info + +**Features to Test**: +- [ ] Upload manual SSL certificate +- [ ] Issue auto SSL certificate (Let's Encrypt) +- [ ] View SSL certificate details (verify SANs display correctly) +- [ ] Renew SSL certificate +- [ ] List all SSL certificates +- [ ] View certificate subject/issuer details + +### 3. Access Lists Repository ✅ +**File**: `src/domains/access-lists/access-lists.repository.ts` + +**Fields Converted**: +- `allowedIps` (String[] → String) + +**Features to Test**: +- [ ] Create access list with IP whitelist +- [ ] Create access list with CIDR ranges +- [ ] Update access list IPs +- [ ] Delete access list +- [ ] List all access lists +- [ ] Apply access list to domain + +### 4. Account Repository ✅ +**File**: `src/domains/account/account.repository.ts` + +**Fields Converted**: +- `backupCodes` (String[] → String) + +**Features to Test**: +- [ ] Enable 2FA +- [ ] Generate backup codes +- [ ] Use backup code to login +- [ ] Regenerate backup codes +- [ ] View backup codes in profile + +### 5. Alerts Repository ✅ +**File**: `src/domains/alerts/alerts.repository.ts` + +**Fields Converted**: +- `config` (Json → String) - Email, Telegram config + +**Features to Test**: +- [ ] Create email notification channel +- [ ] Create Telegram notification channel +- [ ] Update notification channel config +- [ ] Test notification channel +- [ ] Create alert rule with notification +- [ ] Trigger alert and verify notification sent + +### 6. Backup Service ✅ +**File**: `src/domains/backup/backup.service.ts` + +**Fields Converted**: +- `metadata` (Json → String) + +**Features to Test**: +- [ ] Create manual backup +- [ ] Create scheduled backup +- [ ] List all backups (verify metadata displays) +- [ ] Restore from backup +- [ ] Download backup file +- [ ] Delete backup + +## Critical Test Scenarios + +### Scenario 1: Domain with Full Configuration +1. Create domain with: + - Upstreams + - Load balancer + - Custom locations (multiple location blocks) + - Real IP with custom CIDRs + - SSL certificate + - Access list +2. Verify all settings save correctly +3. Update each setting +4. Delete domain + +### Scenario 2: SSL Certificate Management +1. Upload manual SSL with multiple SANs +2. Verify certificate details show all SANs +3. Verify subject/issuer details display +4. Auto-renew certificate +5. Verify renewal preserves all details + +### Scenario 3: Access Control +1. Create access list with: + - Multiple IP addresses + - Multiple CIDR ranges + - Basic auth users +2. Apply to multiple domains +3. Update IP list +4. Test access from allowed/blocked IPs + +### Scenario 4: Notifications & Alerts +1. Create email notification channel +2. Create Telegram notification channel +3. Create alert rules for: + - Domain down + - SSL expiring + - High traffic +4. Trigger alerts +5. Verify notifications sent correctly + +### Scenario 5: Backup & Restore +1. Create full system backup +2. Make changes to system +3. Restore from backup +4. Verify all data restored: + - Domains with custom locations + - SSL certificates with SANs + - Access lists with IPs + - Notification channels with config + +## Verification Commands + +### Check Database File +```bash +# Verify SQLite database exists +ls -lh apps/api/prisma/nginx_waf.db + +# Check database size +du -h apps/api/prisma/nginx_waf.db +``` + +### Verify JSON Fields +```bash +# Connect to SQLite database +sqlite3 apps/api/prisma/nginx_waf.db + +# Check domains with custom locations +SELECT id, name, customLocations FROM Domain WHERE customLocations IS NOT NULL LIMIT 5; + +# Check SSL certificates with SANs +SELECT id, commonName, sans FROM SSLCertificate LIMIT 5; + +# Check access lists with IPs +SELECT id, name, allowedIps FROM AccessList LIMIT 5; + +# Check notification channels with config +SELECT id, name, type, config FROM NotificationChannel LIMIT 5; + +# Exit +.quit +``` + +### Check Application Logs +```bash +# Monitor backend logs +tail -f apps/api/logs/app.log + +# Check for JSON parse errors +grep -i "json\|parse\|serialize" apps/api/logs/app.log +``` + +## Common Issues & Solutions + +### Issue 1: Empty Arrays/Objects Display as Strings +**Symptom**: UI shows `"[]"` or `"{}"` instead of empty state + +**Cause**: Frontend not handling deserialized data correctly + +**Solution**: Check that API returns properly parsed data, not JSON strings + +### Issue 2: Cannot Save Custom Locations +**Symptom**: Error when creating domain with custom locations + +**Cause**: Data not being serialized before database write + +**Solution**: Verify `domains.repository.ts` uses `JSON.stringify()` on create/update + +### Issue 3: SANs Not Displaying +**Symptom**: SSL certificate shows empty SANs array + +**Cause**: Data not being deserialized when reading from database + +**Solution**: Verify `ssl.repository.ts` uses `transformSSLCertificate()` on all reads + +### Issue 4: Notification Config Lost +**Symptom**: Notification channel loses email/Telegram config after save + +**Cause**: Config not being serialized/deserialized properly + +**Solution**: Verify `alerts.repository.ts` uses serialize/deserialize helpers + +## Migration Rollback + +If critical issues are found: + +1. **Stop Services**: + ```bash + sudo systemctl stop nginx-love-backend nginx-love-frontend + ``` + +2. **Restore PostgreSQL Config**: + ```bash + cp backups/postgres-to-sqlite-*/. env.backup apps/api/.env + ``` + +3. **Remove SQLite Database**: + ```bash + rm apps/api/prisma/nginx_waf.db + ``` + +4. **Start PostgreSQL**: + ```bash + docker start nginx-love-postgres + ``` + +5. **Restart Services**: + ```bash + sudo systemctl start nginx-love-backend nginx-love-frontend + ``` + +## Success Criteria + +Mark this migration as successful when: + +- [ ] All domain operations work (create, update, delete) +- [ ] SSL certificate management works (upload, renew, view) +- [ ] Access lists work (create, update, apply) +- [ ] 2FA and backup codes work +- [ ] Notification channels work (email, Telegram) +- [ ] Alerts trigger and send notifications +- [ ] Backups create and restore successfully +- [ ] ModSecurity rules apply correctly +- [ ] No JSON parse/serialize errors in logs +- [ ] Build completes with 0 errors +- [ ] All UI features display data correctly + +## Performance Benchmarks + +Compare before/after migration: + +| Operation | PostgreSQL | SQLite | Notes | +|-----------|-----------|---------|-------| +| List 100 domains | - | - | Should be similar | +| Create domain | - | - | Should be similar | +| SSL upload | - | - | Should be similar | +| Backup creation | - | - | May be faster | +| Full restore | - | - | May be faster | + +## Reporting Issues + +If you find issues: + +1. Document the exact steps to reproduce +2. Include error messages from logs +3. Note which feature is affected +4. Check if data is correctly stored in database (use sqlite3 commands above) +5. Report to GitHub issue or PR comments + +## Additional Resources + +- Main Migration Guide: `docs/MIGRATION_POSTGRES_TO_SQLITE.md` +- Build Fixes Status: `docs/SQLITE_BUILD_FIXES_STATUS.md` +- Prisma Schema: `apps/api/prisma/schema.prisma` diff --git a/scripts/deploy.sh b/scripts/deploy.sh index b6ef20a..ce309b1 100755 --- a/scripts/deploy.sh +++ b/scripts/deploy.sh @@ -21,13 +21,9 @@ PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" BACKEND_DIR="$PROJECT_DIR/apps/api" FRONTEND_DIR="$PROJECT_DIR/apps/web" LOG_FILE="/var/log/nginx-love-ui-deploy.log" +DB_DIR="$BACKEND_DIR/prisma" -# Database configuration -DB_CONTAINER_NAME="nginx-love-postgres" -DB_NAME="nginx_love_db" -DB_USER="nginx_love_user" -DB_PASSWORD=$(openssl rand -base64 32 | tr -d "=+/" | cut -c1-32) -DB_PORT=5432 +# Security configuration JWT_ACCESS_SECRET=$(openssl rand -base64 64 | tr -d "=+/" | cut -c1-64) JWT_REFRESH_SECRET=$(openssl rand -base64 64 | tr -d "=+/" | cut -c1-64) SESSION_SECRET=$(openssl rand -base64 64 | tr -d "=+/" | cut -c1-64) @@ -165,59 +161,15 @@ fi PKG_MANAGER="pnpm" log "✓ Package manager: ${PKG_MANAGER}" -# Step 2: Setup PostgreSQL with Docker -log "Step 2/8: Setting up PostgreSQL with Docker..." +# Step 2: Setup SQLite Database +log "Step 2/8: Setting up SQLite Database..." -# Stop and remove existing container if exists -if docker ps -a | grep -q "${DB_CONTAINER_NAME}"; then - log "Removing existing PostgreSQL container..." - docker stop "${DB_CONTAINER_NAME}" 2>/dev/null || true - docker rm "${DB_CONTAINER_NAME}" 2>/dev/null || true -fi - -# Remove old volume to ensure clean installation -if docker volume ls | grep -q nginx-love-postgres-data; then - log "Removing old PostgreSQL volume for clean installation..." - docker volume rm nginx-love-postgres-data 2>/dev/null || true -fi - -# Create Docker network if not exists -if ! docker network ls | grep -q nginx-love-network; then - docker network create nginx-love-network >> "$LOG_FILE" 2>&1 - log "✓ Docker network created" -fi - -# Start PostgreSQL container -log "Starting PostgreSQL container..." -docker run -d \ - --name "${DB_CONTAINER_NAME}" \ - --network nginx-love-network \ - -e POSTGRES_DB="${DB_NAME}" \ - -e POSTGRES_USER="${DB_USER}" \ - -e POSTGRES_PASSWORD="${DB_PASSWORD}" \ - -p 127.0.0.1:"${DB_PORT}":5432 \ - -v nginx-love-postgres-data:/var/lib/postgresql/data \ - --restart unless-stopped \ - postgres:15-alpine >> "${LOG_FILE}" 2>&1 || error "Failed to start PostgreSQL container" - -# Wait for PostgreSQL to be ready -log "Waiting for PostgreSQL to be ready..." -sleep 5 -for i in {1..30}; do - if docker exec "${DB_CONTAINER_NAME}" pg_isready -U "${DB_USER}" > /dev/null 2>&1; then - log "✓ PostgreSQL is ready" - break - fi - if [ "${i}" -eq 30 ]; then - error "PostgreSQL failed to start" - fi - sleep 1 -done - -log "✓ PostgreSQL container started successfully" -log " • Database: ${DB_NAME}" -log " • User: ${DB_USER}" -log " • Port: ${DB_PORT}" +# Create database directory if it doesn't exist +mkdir -p "${DB_DIR}" +log "✓ Database directory created at ${DB_DIR}" +log "✓ SQLite database will be created at ${DB_DIR}/nginx_waf.db" +log " • No Docker container required" +log " • No PostgreSQL installation required" # Step 3: Install Nginx + ModSecurity log "Step 3/8: Installing Nginx + ModSecurity..." @@ -247,8 +199,8 @@ cd "${BACKEND_DIR}" # Create backend .env from .env.example (always create fresh) log "Creating fresh backend .env from .env.example..." cat > ".env" <> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma cli log "Running database migrations..." pnpm exec prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" -# Force reseed database after fresh PostgreSQL install +# Force reseed database after fresh installation log "Seeding database..." rm -f .seeded # Remove marker to force reseed pnpm prisma:seed >> "$LOG_FILE" 2>&1 || warn "Failed to seed database" @@ -381,7 +333,7 @@ log "Setting up systemd services..." cat > /etc/systemd/system/nginx-love-backend.service < /root/.nginx-love-credentials < "$BACKUP_DIR/export-postgres-data.ts" <<'EOF' +import { PrismaClient } from '@prisma/client'; +import * as fs from 'fs'; +import * as path from 'path'; + +const prisma = new PrismaClient(); + +async function exportData() { + console.log('📤 Exporting data from PostgreSQL...'); + + const exportDir = process.argv[2] || './export'; + if (!fs.existsSync(exportDir)) { + fs.mkdirSync(exportDir, { recursive: true }); + } + + const data: any = {}; + + // Export all tables in dependency order + console.log('Exporting users...'); + data.users = await prisma.user.findMany({ + include: { + profile: true, + twoFactor: true, + } + }); + + console.log('Exporting activity logs...'); + data.activityLogs = await prisma.activityLog.findMany(); + + console.log('Exporting refresh tokens...'); + data.refreshTokens = await prisma.refreshToken.findMany(); + + console.log('Exporting user sessions...'); + data.userSessions = await prisma.userSession.findMany(); + + console.log('Exporting domains...'); + data.domains = await prisma.domain.findMany({ + include: { + upstreams: true, + loadBalancer: true, + sslCertificate: true, + } + }); + + console.log('Exporting ModSecurity CRS rules...'); + data.modSecCRSRules = await prisma.modSecCRSRule.findMany(); + + console.log('Exporting ModSecurity custom rules...'); + data.modSecRules = await prisma.modSecRule.findMany(); + + console.log('Exporting nginx configs...'); + data.nginxConfigs = await prisma.nginxConfig.findMany(); + + console.log('Exporting installation status...'); + data.installationStatus = await prisma.installationStatus.findMany(); + + console.log('Exporting notification channels...'); + data.notificationChannels = await prisma.notificationChannel.findMany(); + + console.log('Exporting alert rules...'); + data.alertRules = await prisma.alertRule.findMany({ + include: { + channels: { + include: { + channel: true, + } + } + } + }); + + console.log('Exporting alert history...'); + data.alertHistory = await prisma.alertHistory.findMany(); + + console.log('Exporting ACL rules...'); + data.aclRules = await prisma.aclRule.findMany(); + + console.log('Exporting access lists...'); + data.accessLists = await prisma.accessList.findMany({ + include: { + authUsers: true, + domains: true, + } + }); + + console.log('Exporting performance metrics...'); + data.performanceMetrics = await prisma.performanceMetric.findMany(); + + console.log('Exporting backup schedules...'); + data.backupSchedules = await prisma.backupSchedule.findMany({ + include: { + backups: true, + } + }); + + console.log('Exporting slave nodes...'); + data.slaveNodes = await prisma.slaveNode.findMany(); + + console.log('Exporting system config...'); + data.systemConfigs = await prisma.systemConfig.findMany(); + + console.log('Exporting sync logs...'); + data.syncLogs = await prisma.syncLog.findMany(); + + console.log('Exporting config versions...'); + data.configVersions = await prisma.configVersion.findMany(); + + console.log('Exporting network load balancers...'); + data.networkLoadBalancers = await prisma.networkLoadBalancer.findMany({ + include: { + upstreams: true, + healthChecks: true, + } + }); + + // Save to JSON file + const exportFile = path.join(exportDir, 'postgres-export.json'); + fs.writeFileSync(exportFile, JSON.stringify(data, null, 2)); + + console.log(`✅ Data exported to ${exportFile}`); + console.log(`📊 Total records exported: ${JSON.stringify(data).length} bytes`); + + await prisma.$disconnect(); +} + +exportData().catch((error) => { + console.error('❌ Export failed:', error); + process.exit(1); +}); +EOF + +# Run export +info "Running data export from PostgreSQL..." +pnpm ts-node "$BACKUP_DIR/export-postgres-data.ts" "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 || error "Failed to export data from PostgreSQL" + +log "✓ Data exported successfully to $BACKUP_DIR/postgres-export.json" + +# Step 4: Update Prisma schema and generate new SQLite client +log "Step 4/6: Preparing SQLite database..." + +# Update DATABASE_URL in .env +SQLITE_DB_PATH="$BACKEND_DIR/prisma/nginx_waf.db" +sed -i.migration_backup "s|^DATABASE_URL=.*|DATABASE_URL=\"file:./nginx_waf.db\"|g" .env + +log "✓ Updated DATABASE_URL to use SQLite" + +# Generate Prisma client and run migrations +info "Generating Prisma client for SQLite..." +pnpm prisma generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" + +info "Running SQLite migrations..." +pnpm prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" + +log "✓ SQLite database created and migrated" + +# Step 5: Import data into SQLite +log "Step 5/6: Importing data into SQLite..." + +cat > "$BACKUP_DIR/import-sqlite-data.ts" <<'EOF' +import { PrismaClient } from '@prisma/client'; +import * as fs from 'fs'; +import * as path from 'path'; + +const prisma = new PrismaClient(); + +// Helper function to convert enum arrays to strings +function convertEnumsToStrings(data: any): any { + if (Array.isArray(data)) { + return data.map(item => convertEnumsToStrings(item)); + } else if (data && typeof data === 'object') { + const converted: any = {}; + for (const [key, value] of Object.entries(data)) { + // Convert arrays to JSON strings for SQLite + if (Array.isArray(value) && typeof value[0] === 'string') { + converted[key] = JSON.stringify(value); + } + // Convert objects to JSON strings for SQLite + else if (value && typeof value === 'object' && !Array.isArray(value) && !(value instanceof Date)) { + if (key.endsWith('Details') || key === 'config' || key === 'metadata' || key === 'configData') { + converted[key] = JSON.stringify(value); + } else { + converted[key] = convertEnumsToStrings(value); + } + } else { + converted[key] = value; + } + } + return converted; + } + return data; +} + +async function importData() { + console.log('📥 Importing data into SQLite...'); + + const importFile = path.join(process.argv[2] || './export', 'postgres-export.json'); + + if (!fs.existsSync(importFile)) { + throw new Error(`Import file not found: ${importFile}`); + } + + const rawData = JSON.parse(fs.readFileSync(importFile, 'utf-8')); + const data = convertEnumsToStrings(rawData); + + // Import in dependency order + + // 1. Users (without relations first) + console.log('Importing users...'); + for (const user of data.users) { + const { profile, twoFactor, activities, refreshTokens, sessions, ...userData } = user; + await prisma.user.create({ data: userData }); + } + + // 2. User profiles + console.log('Importing user profiles...'); + for (const user of data.users) { + if (user.profile) { + await prisma.userProfile.create({ data: user.profile }); + } + } + + // 3. Two-factor auth + console.log('Importing two-factor auth...'); + for (const user of data.users) { + if (user.twoFactor) { + await prisma.twoFactorAuth.create({ data: user.twoFactor }); + } + } + + // 4. Activity logs + console.log('Importing activity logs...'); + for (const log of data.activityLogs) { + await prisma.activityLog.create({ data: log }); + } + + // 5. Refresh tokens + console.log('Importing refresh tokens...'); + for (const token of data.refreshTokens) { + await prisma.refreshToken.create({ data: token }); + } + + // 6. User sessions + console.log('Importing user sessions...'); + for (const session of data.userSessions) { + await prisma.userSession.create({ data: session }); + } + + // 7. Domains (without relations) + console.log('Importing domains...'); + for (const domain of data.domains) { + const { upstreams, loadBalancer, sslCertificate, modsecCRSRules, modsecRules, accessLists, ...domainData } = domain; + await prisma.domain.create({ data: domainData }); + } + + // 8. Upstreams + console.log('Importing upstreams...'); + for (const domain of data.domains) { + if (domain.upstreams) { + for (const upstream of domain.upstreams) { + await prisma.upstream.create({ data: upstream }); + } + } + } + + // 9. Load balancer configs + console.log('Importing load balancer configs...'); + for (const domain of data.domains) { + if (domain.loadBalancer) { + await prisma.loadBalancerConfig.create({ data: domain.loadBalancer }); + } + } + + // 10. SSL certificates + console.log('Importing SSL certificates...'); + for (const domain of data.domains) { + if (domain.sslCertificate) { + await prisma.sSLCertificate.create({ data: domain.sslCertificate }); + } + } + + // 11. ModSecurity CRS rules + console.log('Importing ModSecurity CRS rules...'); + for (const rule of data.modSecCRSRules) { + await prisma.modSecCRSRule.create({ data: rule }); + } + + // 12. ModSecurity custom rules + console.log('Importing ModSecurity custom rules...'); + for (const rule of data.modSecRules) { + await prisma.modSecRule.create({ data: rule }); + } + + // 13. Nginx configs + console.log('Importing nginx configs...'); + for (const config of data.nginxConfigs) { + await prisma.nginxConfig.create({ data: config }); + } + + // 14. Installation status + console.log('Importing installation status...'); + for (const status of data.installationStatus) { + await prisma.installationStatus.create({ data: status }); + } + + // 15. Notification channels + console.log('Importing notification channels...'); + for (const channel of data.notificationChannels) { + await prisma.notificationChannel.create({ data: channel }); + } + + // 16. Alert rules (without relations) + console.log('Importing alert rules...'); + for (const rule of data.alertRules) { + const { channels, ...ruleData } = rule; + await prisma.alertRule.create({ data: ruleData }); + } + + // 17. Alert rule channels + console.log('Importing alert rule channels...'); + for (const rule of data.alertRules) { + if (rule.channels) { + for (const channel of rule.channels) { + await prisma.alertRuleChannel.create({ data: channel }); + } + } + } + + // 18. Alert history + console.log('Importing alert history...'); + for (const alert of data.alertHistory) { + await prisma.alertHistory.create({ data: alert }); + } + + // 19. ACL rules + console.log('Importing ACL rules...'); + for (const rule of data.aclRules) { + await prisma.aclRule.create({ data: rule }); + } + + // 20. Access lists (without relations) + console.log('Importing access lists...'); + for (const list of data.accessLists) { + const { authUsers, domains, ...listData } = list; + await prisma.accessList.create({ data: listData }); + } + + // 21. Access list auth users + console.log('Importing access list auth users...'); + for (const list of data.accessLists) { + if (list.authUsers) { + for (const user of list.authUsers) { + await prisma.accessListAuthUser.create({ data: user }); + } + } + } + + // 22. Access list domains + console.log('Importing access list domains...'); + for (const list of data.accessLists) { + if (list.domains) { + for (const domain of list.domains) { + await prisma.accessListDomain.create({ data: domain }); + } + } + } + + // 23. Performance metrics + console.log('Importing performance metrics...'); + for (const metric of data.performanceMetrics) { + await prisma.performanceMetric.create({ data: metric }); + } + + // 24. Backup schedules (without relations) + console.log('Importing backup schedules...'); + for (const schedule of data.backupSchedules) { + const { backups, ...scheduleData } = schedule; + await prisma.backupSchedule.create({ data: scheduleData }); + } + + // 25. Backup files + console.log('Importing backup files...'); + for (const schedule of data.backupSchedules) { + if (schedule.backups) { + for (const backup of schedule.backups) { + await prisma.backupFile.create({ data: backup }); + } + } + } + + // 26. Slave nodes + console.log('Importing slave nodes...'); + for (const node of data.slaveNodes) { + await prisma.slaveNode.create({ data: node }); + } + + // 27. System config + console.log('Importing system config...'); + for (const config of data.systemConfigs) { + await prisma.systemConfig.create({ data: config }); + } + + // 28. Sync logs + console.log('Importing sync logs...'); + for (const log of data.syncLogs) { + await prisma.syncLog.create({ data: log }); + } + + // 29. Config versions + console.log('Importing config versions...'); + for (const version of data.configVersions) { + await prisma.configVersion.create({ data: version }); + } + + // 30. Network load balancers (without relations) + console.log('Importing network load balancers...'); + for (const nlb of data.networkLoadBalancers) { + const { upstreams, healthChecks, ...nlbData } = nlb; + await prisma.networkLoadBalancer.create({ data: nlbData }); + } + + // 31. NLB upstreams + console.log('Importing NLB upstreams...'); + for (const nlb of data.networkLoadBalancers) { + if (nlb.upstreams) { + for (const upstream of nlb.upstreams) { + await prisma.nLBUpstream.create({ data: upstream }); + } + } + } + + // 32. NLB health checks + console.log('Importing NLB health checks...'); + for (const nlb of data.networkLoadBalancers) { + if (nlb.healthChecks) { + for (const check of nlb.healthChecks) { + await prisma.nLBHealthCheck.create({ data: check }); + } + } + } + + console.log('✅ Data imported successfully into SQLite'); + + await prisma.$disconnect(); +} + +importData().catch((error) => { + console.error('❌ Import failed:', error); + console.error('Stack trace:', error.stack); + process.exit(1); +}); +EOF + +# Run import +info "Running data import into SQLite..." +pnpm ts-node "$BACKUP_DIR/import-sqlite-data.ts" "$BACKUP_DIR" >> "$LOG_FILE" 2>&1 || { + error "Failed to import data into SQLite. Restoring PostgreSQL configuration..." + cp "$BACKUP_DIR/.env.backup" .env + exit 1 +} + +log "✓ Data imported successfully into SQLite" + +# Step 6: Verification and cleanup +log "Step 6/6: Verification and cleanup..." + +# Verify SQLite database +if [ ! -f "$SQLITE_DB_PATH" ]; then + error "SQLite database file not found at $SQLITE_DB_PATH" +fi + +SQLITE_SIZE=$(du -h "$SQLITE_DB_PATH" | cut -f1) +log "✓ SQLite database created: $SQLITE_SIZE" + +# Final summary +log "" +log "==================================" +log "Migration Completed Successfully!" +log "==================================" +log "" +log "📋 Summary:" +log " • Old database: PostgreSQL" +log " • New database: SQLite ($SQLITE_SIZE)" +log " • Database location: $SQLITE_DB_PATH" +log " • Backup location: $BACKUP_DIR" +log "" +log "📝 Next Steps:" +log " 1. Test the application with SQLite database" +log " 2. If everything works, you can remove PostgreSQL:" +log " - Stop PostgreSQL container: docker stop nginx-love-postgres" +log " - Remove PostgreSQL container: docker rm nginx-love-postgres" +log " - Remove PostgreSQL volume: docker volume rm nginx-love-postgres-data" +log " 3. Keep backup for safety: $BACKUP_DIR" +log "" +log "⚠️ Important:" +log " • Original .env backed up to: $BACKUP_DIR/.env.backup" +log " • PostgreSQL export saved to: $BACKUP_DIR/postgres-export.json" +log " • Migration log saved to: $LOG_FILE" +log "" +log "🔄 To rollback (if needed):" +log " 1. Stop the application" +log " 2. Restore .env: cp $BACKUP_DIR/.env.backup $BACKEND_DIR/.env" +log " 3. Delete SQLite database: rm $SQLITE_DB_PATH" +log " 4. Start PostgreSQL container" +log " 5. Restart the application" +log "" +log "Migration completed at: $(date)" +log "==================================" diff --git a/scripts/quickstart.sh b/scripts/quickstart.sh index a4de69f..7a3077b 100755 --- a/scripts/quickstart.sh +++ b/scripts/quickstart.sh @@ -17,52 +17,18 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" BACKEND_DIR="${PROJECT_DIR}/apps/api" FRONTEND_DIR="${PROJECT_DIR}/apps/web" - -# Database config -DB_NAME="nginx_love_db" -DB_USER="nginx_love_user" -DB_PASSWORD="dev_password_123" -DB_PORT=5432 +DB_DIR="${BACKEND_DIR}/prisma" echo "🚀 Nginx Love UI - Quick Start" echo "================================" echo "" -# Check Docker (optional) -USE_DOCKER=false -if command -v docker &> /dev/null && command -v docker-compose &> /dev/null; then - read -p "Use Docker for PostgreSQL? (y/n, default: y): " USE_DOCKER_INPUT - if [ "${USE_DOCKER_INPUT}" != "n" ]; then - USE_DOCKER=true - fi -fi +# Setup SQLite Database +echo "💾 Using SQLite Database..." +echo " Database will be created at: ${DB_DIR}/dev.db" +echo " No PostgreSQL/Docker required!" -# Setup PostgreSQL -if [ "${USE_DOCKER}" = true ]; then - echo "🐳 Starting PostgreSQL with Docker..." - - # Stop existing container if any - docker stop nginx-love-postgres 2>/dev/null || true - docker rm nginx-love-postgres 2>/dev/null || true - - # Start PostgreSQL - docker run -d \ - --name nginx-love-postgres \ - -e POSTGRES_DB="${DB_NAME}" \ - -e POSTGRES_USER="${DB_USER}" \ - -e POSTGRES_PASSWORD="${DB_PASSWORD}" \ - -p "${DB_PORT}":5432 \ - postgres:15-alpine > /dev/null - - echo "✅ PostgreSQL started in Docker" - echo " Waiting for database to be ready..." - sleep 3 - - DATABASE_URL="postgresql://${DB_USER}:${DB_PASSWORD}@localhost:${DB_PORT}/${DB_NAME}?schema=public" -else - echo "📋 Please ensure PostgreSQL is running and update DATABASE_URL in backend/.env" - DATABASE_URL=${DATABASE_URL:-"postgresql://user:password@localhost:5432/nginx_love_db?schema=public"} -fi +DATABASE_URL="file:./dev.db" # Create backend .env from .env.example if [ ! -f "${BACKEND_DIR}/.env" ]; then @@ -140,11 +106,7 @@ echo " Backend: tail -f /tmp/backend.log" echo " Frontend: tail -f /tmp/frontend.log" echo "" echo "🛑 Stop:" -if [ "${USE_DOCKER}" = true ]; then - echo " kill ${BACKEND_PID} ${FRONTEND_PID} && docker stop nginx-love-postgres" -else - echo " kill ${BACKEND_PID} ${FRONTEND_PID}" -fi +echo " kill ${BACKEND_PID} ${FRONTEND_PID}" echo "" # Wait for services to start @@ -254,11 +216,7 @@ cleanup() { done fi - # Stop Docker PostgreSQL if used - if [ "${USE_DOCKER}" = true ]; then - docker stop nginx-love-postgres 2>/dev/null && echo "✅ PostgreSQL stopped" - fi - + echo "💾 SQLite database preserved at: ${DB_DIR}/dev.db" echo "👋 Goodbye!" exit 0 } diff --git a/scripts/update.sh b/scripts/update.sh index 8ec6282..f56bbb3 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -21,9 +21,7 @@ PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" BACKEND_DIR="$PROJECT_DIR/apps/api" FRONTEND_DIR="$PROJECT_DIR/apps/web" LOG_FILE="/var/log/nginx-love-ui-update.log" - -# Database configuration -DB_CONTAINER_NAME="nginx-love-postgres" +DB_DIR="$BACKEND_DIR/prisma" # Logging functions log() { @@ -61,10 +59,7 @@ if ! systemctl list-unit-files | grep -q nginx-love-frontend.service; then error "Frontend service not found. Please run deploy.sh first." fi -# Check if database container exists -if ! docker ps -a | grep -q "${DB_CONTAINER_NAME}"; then - error "Database container '${DB_CONTAINER_NAME}' not found. Please run deploy.sh first." -fi +log "✓ Services check passed" # Step 1: Check prerequisites log "Step 1/6: Checking prerequisites..." @@ -125,13 +120,6 @@ pnpm install >> "${LOG_FILE}" 2>&1 || error "Failed to update monorepo dependenc cd "${BACKEND_DIR}" -# Start database if not running -if ! docker ps | grep -q "${DB_CONTAINER_NAME}" 2>/dev/null; then - log "Starting database container..." - docker start "${DB_CONTAINER_NAME}" 2>/dev/null || warn "Could not start database container" - sleep 3 -fi - # Generate Prisma client log "Generating Prisma client..." pnpm prisma generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" @@ -182,13 +170,6 @@ log "✓ Frontend build completed" # Step 5: Restart services log "Step 5/6: Starting services..." -# Database should already be running from Step 3, just verify -if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then - error "Database container stopped unexpectedly. Please check Docker status." -else - log "✓ Database container is running" -fi - # Start backend service systemctl start nginx-love-backend.service || error "Failed to start backend service" sleep 3 @@ -290,24 +271,26 @@ log "" log "📋 Updated Components:" log " • Backend API: Rebuilt and restarted" log " • Frontend UI: Rebuilt and restarted" -log " • Database: Migrations applied, missing data created (existing data preserved)" +log " • Database: SQLite migrations applied (${DB_DIR}/nginx_waf.db)" log "" log "🌐 Services Status:" log " • Backend API: http://${PUBLIC_IP}:3001" log " • Frontend UI: http://${PUBLIC_IP}:8080" -log " • Database: Running in Docker container" +log " • Database: SQLite file at ${DB_DIR}/nginx_waf.db" log "" log "📝 Manage Services:" log " Backend: systemctl {start|stop|restart|status} nginx-love-backend" log " Frontend: systemctl {start|stop|restart|status} nginx-love-frontend" -log " Database: docker {start|stop|restart} ${DB_CONTAINER_NAME}" log "" log "📊 View Logs:" log " Backend: tail -f /var/log/nginx-love-backend.log" log " Frontend: tail -f /var/log/nginx-love-frontend.log" -log " Database: docker logs -f ${DB_CONTAINER_NAME}" log " Update: tail -f ${LOG_FILE}" log "" +log "💾 Database Backup:" +log " Backup: cp ${DB_DIR}/nginx_waf.db ${DB_DIR}/nginx_waf.db.backup" +log " Restore: cp ${DB_DIR}/nginx_waf.db.backup ${DB_DIR}/nginx_waf.db" +log "" log "🔐 Access the portal at: http://${PUBLIC_IP}:8080" log "" log "Update log saved to: ${LOG_FILE}"