From fd132102c2dfb094a453b8001f555c49918a2f28 Mon Sep 17 00:00:00 2001 From: Daniella Alamo Date: Sun, 22 Mar 2026 20:09:54 +0100 Subject: [PATCH] feat(phase-3): RabbitMQ plan sync, CI, Docker hardening, search resilience - Add AMQP publisher/consumer, DLQ topology, worker service, POST /jobs/plan-sync - GitHub Actions: PHPUnit unit + Postgres integration tests; MIT LICENSE - Dev image: PHP_BASE_IMAGE (ECR mirror), .dockerignore, Make ready/pull-base - Search cache: epoch invalidation, stampede lock; provider client timeouts/retry/circuit - PlanListParser: suppress libxml noise on invalid XML; php-amqplib dependency Made-with: Cursor --- .dockerignore | 9 + .env.example | 31 +- .github/workflows/ci.yml | 75 ++++ LICENSE | 21 + Makefile | 69 ++- README.md | 397 ++++++++++++------ composer.json | 3 +- composer.lock | 312 +++++++++++++- docker/Dockerfile | 33 +- docker/docker-compose.yml | 41 +- public/index.php | 14 +- scripts/enqueue-plan-sync.php | 18 + scripts/rabbitmq-consume-plan-sync.php | 45 ++ scripts/sync-plans.php | 60 +-- .../Port/PlanSyncJobPublisherPort.php | 14 + .../Service/SearchEventsService.php | 46 +- .../RedisSearchCacheInvalidationAdapter.php | 2 +- .../Bootstrap/SyncPlansServiceFactory.php | 75 ++++ .../External/Provider/EventProviderClient.php | 33 +- .../External/Xml/PlanListParser.php | 6 +- .../Controller/PlanSyncJobsController.php | 38 ++ .../Http/Router/RoutesLoader.php | 8 +- .../RabbitMq/AmqpPlanSyncConsumer.php | 116 +++++ .../RabbitMq/AmqpPlanSyncJobPublisher.php | 137 ++++++ .../Persistence/Cache/RedisCache.php | 31 ++ .../Repository/DoctrinePlanRepository.php | 6 +- 26 files changed, 1389 insertions(+), 251 deletions(-) create mode 100644 .dockerignore create mode 100644 .github/workflows/ci.yml create mode 100644 LICENSE create mode 100644 scripts/enqueue-plan-sync.php create mode 100644 scripts/rabbitmq-consume-plan-sync.php create mode 100644 src/events/Application/Port/PlanSyncJobPublisherPort.php create mode 100644 src/events/Infrastructure/Bootstrap/SyncPlansServiceFactory.php create mode 100644 src/events/Infrastructure/Http/Controller/PlanSyncJobsController.php create mode 100644 src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncConsumer.php create mode 100644 src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncJobPublisher.php diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..769254c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.git +.github +.idea +.vscode +*.md +docs +tests +.env +.env.* diff --git a/.env.example b/.env.example index 544d74a..8808382 100644 --- a/.env.example +++ b/.env.example @@ -5,22 +5,49 @@ DB_DATABASE=events_db DB_USERNAME=events_user DB_PASSWORD=events_pass +# Docker image build (optional) — default uses AWS Public ECR mirror of official php (same as Hub, different CDN). +# If pulls still fail, try one of: +# PHP_BASE_IMAGE=mirror.gcr.io/library/php:8.2-fpm-alpine +# PHP_BASE_IMAGE=php:8.2-fpm-alpine +# PHP_BASE_IMAGE=public.ecr.aws/docker/library/php:8.2-fpm-alpine + # Redis REDIS_HOST=redis REDIS_PORT=6379 -# RabbitMQ +# RabbitMQ (async plan sync — see README) RABBITMQ_HOST=rabbitmq RABBITMQ_PORT=5672 RABBITMQ_USER=events_user RABBITMQ_PASS=events_pass +# Default vhost "/" — set explicitly if your broker uses another vhost +# RABBITMQ_VHOST=/ +# Optional overrides (defaults match AmqpPlanSyncJobPublisher constants): +# RABBITMQ_EXCHANGE=ticketbridge +# RABBITMQ_DLX_EXCHANGE=ticketbridge.dlx +# RABBITMQ_SYNC_QUEUE=ticketbridge.sync.plans +# RABBITMQ_SYNC_ROUTING_KEY=sync.plans +# RABBITMQ_SYNC_DLQ=ticketbridge.sync.plans.dlq +# RABBITMQ_SYNC_DLQ_ROUTING_KEY=sync.plans.dlq +# RABBITMQ_CONN_TIMEOUT=5 +# RABBITMQ_IO_TIMEOUT=10 +# RABBITMQ_CONSUMER_IO_TIMEOUT=130 +# RABBITMQ_HEARTBEAT=30 # Provider feed: local XML from tests/Fixtures (default) OR HTTP if you clear this path PROVIDER_FIXTURE_PATH=tests/Fixtures/provider_responses/response_1.xml # Used only when PROVIDER_FIXTURE_PATH is empty: PROVIDER_URL=https://your-provider.example/api/events + +# HTTP client (sync only — /search does not call the provider) EVENT_PROVIDER_TIMEOUT=10 +EVENT_PROVIDER_CONNECT_TIMEOUT=5 EVENT_PROVIDER_RETRY_ATTEMPTS=3 +EVENT_PROVIDER_RETRY_BASE_MS=500 + +# Circuit breaker (sync only; state stored in Redis) +EVENT_PROVIDER_CB_FAILURES=5 +EVENT_PROVIDER_CB_OPEN_SECONDS=60 -# Cache Configuration +# /search response cache (TTL per key; invalidation bumps epoch — see README) CACHE_SEARCH_TTL=60 \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..906d9c1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,75 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + unit: + name: PHPUnit (unit) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: shivammathur/setup-php@v2 + with: + php-version: "8.2" + extensions: dom, mbstring, zip + coverage: none + + - name: Install Composer dependencies + run: composer install --no-interaction --prefer-dist + + - name: Run unit tests + run: ./vendor/bin/phpunit tests/Unit --testdox + + integration-database: + name: PHPUnit (integration / DB) + runs-on: ubuntu-latest + services: + postgres: + image: postgres:15-alpine + env: + POSTGRES_USER: events_user + POSTGRES_PASSWORD: events_pass + POSTGRES_DB: events_db + ports: + - 5432:5432 + options: >- + --health-cmd "pg_isready -U events_user -d events_db" + --health-interval 5s + --health-timeout 5s + --health-retries 10 + + steps: + - uses: actions/checkout@v4 + + - uses: shivammathur/setup-php@v2 + with: + php-version: "8.2" + extensions: dom, mbstring, pdo_pgsql, zip + coverage: none + + - name: Install PostgreSQL client + run: sudo apt-get update && sudo apt-get install -y postgresql-client + + - name: Apply schema + env: + PGPASSWORD: events_pass + run: | + psql -h 127.0.0.1 -p 5432 -U events_user -d events_db -f database/schema.sql + psql -h 127.0.0.1 -p 5432 -U events_user -d events_db -f database/migrations/002_zones_available_seats.sql + + - name: Install Composer dependencies + run: composer install --no-interaction --prefer-dist + + - name: Run database integration tests + env: + DB_HOST: 127.0.0.1 + DB_PORT: "5432" + DB_DATABASE: events_db + DB_USERNAME: events_user + DB_PASSWORD: events_pass + run: ./vendor/bin/phpunit tests/Integration/Database --testdox diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..a4c392c --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 TicketBridge contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile index 733f891..c3c0e02 100644 --- a/Makefile +++ b/Makefile @@ -1,26 +1,66 @@ -.PHONY: install test run sync migrate cache-clear stop clean help +.PHONY: install test test-all test-integration ready wait-ready run sync migrate cache-clear stop clean help consume-sync enqueue-plan-sync pull-base help: - @echo "make install - Setup" - @echo "make run - Start" - @echo "make test - Unit tests" - @echo "make test-integration - DB integration tests (Docker app)" - @echo "make stop - Stop" + @echo "make pull-base - docker pull PHP base (default: AWS Public ECR mirror of official php)" + @echo "make ready - install + run full test suite (best first run after clone)" + @echo "make install - composer + compose up + wait for Postgres + migrate + sync" + @echo "make test-all - all PHPUnit tests inside app container (needs stack up)" + @echo "make test - unit tests only (inside app container)" + @echo "make test-integration - DB integration tests (inside app container)" + @echo "make run - Start Docker stack" + @echo "make migrate - Apply schema + migrations (Postgres in compose)" + @echo "make sync - Run provider sync inside app container (inline)" + @echo "make enqueue-plan-sync - Publish one async plan-sync job (RabbitMQ)" + @echo "make consume-sync - Run plan-sync consumer in foreground (worker)" + @echo "make stop - Stop" + +# Wait until Postgres accepts connections (avoids flaky migrate/sync after cold start). +wait-ready: + @echo "Waiting for PostgreSQL…" + @i=1; while [ $$i -le 60 ]; do \ + docker-compose -f docker/docker-compose.yml exec -T postgres pg_isready -U events_user -d events_db >/dev/null 2>&1 && exit 0; \ + i=$$((i+1)); sleep 1; \ + done; \ + echo "PostgreSQL did not become ready in time."; exit 1 + +# Same image as Dockerfile ARG (AWS Public ECR mirror of official php — often works when Docker Hub CDN times out). +PHP_BASE_IMAGE ?= public.ecr.aws/docker/library/php:8.2-fpm-alpine + +# Pull base image before compose build — avoids BuildKit "DeadlineExceeded" on flaky Docker Hub CDN. +pull-base: + @echo "Pulling $(PHP_BASE_IMAGE) (up to 6 attempts)…" + @attempt=1; \ + while [ $$attempt -le 6 ]; do \ + docker pull $(PHP_BASE_IMAGE) && exit 0; \ + echo "Pull failed (attempt $$attempt/6). Next retry in 25s…"; \ + echo "Tip: try PHP_BASE_IMAGE=mirror.gcr.io/library/php:8.2-fpm-alpine make pull-base"; \ + attempt=$$((attempt+1)); \ + sleep 25; \ + done; \ + echo "Could not pull base image. Set PHP_BASE_IMAGE in .env (see .env.example) or fix network/VPN/DNS."; \ + exit 1 install: @if [ ! -f .env ]; then cp .env.example .env; fi composer install - docker-compose -f docker/docker-compose.yml up -d - @sleep 15 + @$(MAKE) pull-base + BUILDKIT_PULL_POLICY=if-not-present docker-compose -f docker/docker-compose.yml up -d + @$(MAKE) wait-ready @$(MAKE) migrate @$(MAKE) sync - @echo "✅ Setup complete! API available at http://localhost:8000" + @echo "✅ Setup complete! API at http://localhost:8000" + +ready: install test-all + @echo "✅ Ready: stack is up, migrations applied, sync done, all tests passed." test: - ./vendor/bin/phpunit tests/Unit --testdox + docker-compose -f docker/docker-compose.yml exec app ./vendor/bin/phpunit tests/Unit --testdox + +test-all: + docker-compose -f docker/docker-compose.yml exec app ./vendor/bin/phpunit tests --testdox run: - docker-compose -f docker/docker-compose.yml up -d + BUILDKIT_PULL_POLICY=if-not-present docker-compose -f docker/docker-compose.yml up -d @echo "API: http://localhost:8000" sync: @@ -31,8 +71,13 @@ migrate: @docker-compose -f docker/docker-compose.yml exec -T postgres psql -U events_user -d events_db < database/migrations/002_zones_available_seats.sql 2>/dev/null || true test-integration: - docker-compose -f docker/docker-compose.yml exec app ./vendor/bin/phpunit tests/Integration/Database --testdox + docker-compose -f docker/docker-compose.yml exec app ./vendor/bin/phpunit tests/Integration --testdox + +enqueue-plan-sync: + docker-compose -f docker/docker-compose.yml exec app php scripts/enqueue-plan-sync.php +consume-sync: + docker-compose -f docker/docker-compose.yml exec app php scripts/rabbitmq-consume-plan-sync.php stop: docker-compose -f docker/docker-compose.yml down diff --git a/README.md b/README.md index 8bf12d1..8eb1fe1 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,135 @@ # TicketBridge -TicketBridge is a small **events microservice** that ingests plans from a **configurable external provider** (XML over HTTP), stores them for historical lookup, and exposes a **date-range search** API in JSON. +**TicketBridge** is a production-minded **events microservice** in PHP: it ingests event plans from a **pluggable provider** (XML over HTTP or local fixtures), persists them in **PostgreSQL**, exposes a fast **date-range search** API, supports **seat reservations** without overselling, and can run plan sync **synchronously or via RabbitMQ**—all behind **DDD-style boundaries** and **explicit ports**. -The external provider is treated as an unreliable dependency: sync uses retries, timeouts, and a circuit breaker; search stays fast via caching and reads from the local database so it does not depend on the provider being up. +It is designed to read like a **small but serious** system: clear separation of concerns, defensive handling of an unreliable upstream, and operations-friendly defaults (Docker Compose, Make targets, documented env vars). -## What it does +--- + +## Why this repository stands out + +| Theme | What you will find here | +|--------|-------------------------| +| **Architecture** | Domain / Application / Infrastructure; **ports** for provider, parser, cache invalidation, and job publishing—**not** a ball of Guzzle+SQL in controllers. | +| **Data integrity** | Full sync path in **one DB transaction**; search cache invalidated **only after commit**. | +| **Concurrency** | **`SELECT … FOR UPDATE`** on `zones` when reserving seats; documented limits (single DB vs sharding). | +| **Performance** | **`/search` = one SQL round-trip** (joins + aggregates), optional **Redis** with **epoch-based** invalidation and **single-flight** lock against stampedes. | +| **Resilience** | Provider calls: **timeouts**, **retries with backoff**, **circuit breaker** (state in Redis)—**isolated from the read path** so search stays up when the feed is down. | +| **Async** | **RabbitMQ**: durable messages, topic exchange, **DLQ** for poison/failed jobs, worker service in Compose. | +| **Developer experience** | **Offline sync** via `PROVIDER_FIXTURE_PATH`; after clone, **`make ready`** brings the stack up, migrates, syncs, and runs the **full test suite** in Docker. | + +--- + +## Table of contents + +- [Stack](#stack) +- [Quick start](#quick-start) +- [Commands](#commands) +- [API](#api) +- [Architecture](#architecture) +- [Technical deep dives](#technical-deep-dives) +- [Configuration](#configuration) +- [Testing](#testing) +- [Repository layout](#repository-layout) +- [Extending to another provider](#extending-to-another-provider) +- [Branding & history](#branding--history) +- [Production & security notes](#production--security-notes) +- [Scaling ideas](#scaling-ideas) + +--- + +## Stack + +| Layer | Technology | +|--------|------------| +| Runtime | PHP 8.2+ | +| HTTP | Built-in server in dev (`public/index.php`), Symfony HttpFoundation + Routing | +| ORM / DB | Doctrine ORM + DBAL, PostgreSQL 15 | +| Cache / CB state | Redis 7 (Predis) | +| Messaging | RabbitMQ 3 (php-amqplib), management UI on `:15672` | +| Containers | Docker Compose (`docker/docker-compose.yml`) | + +--- + +## Quick start + +**Prerequisites:** **Git**, **Docker** (with Compose), **Make**, **Composer**, and **PHP 8.2+** on the host (Composer installs `vendor/` into the project; containers mount that tree). + +**Recommended first run (one command after clone):** + +```bash +git clone && cd TicketBridge +make ready +``` + +`make ready` runs **`make install`** then **`make test-all`**. It copies `.env` from `.env.example` if missing, runs `composer install`, starts the stack, **waits until PostgreSQL accepts connections** (no blind `sleep`), applies schema + migrations, runs an initial sync, then executes **all PHPUnit tests inside the `app` container** (unit + integration, including API smoke against the running server). + +**Alternative (two commands):** -- **Sync** fetches the provider feed (XML) and upserts plans and zones in PostgreSQL. Plans that were once available with `sell_mode: online` remain queryable even if they disappear from later provider responses. -- **Search** (`GET /search`) accepts `starts_at` and `ends_at` and returns events overlapping that range, with aggregated min/max prices. -- **Configuration**: all external URLs and infrastructure hosts come from environment variables (see [Installation & Setup](#installation--setup)). There are no hardcoded provider endpoints in the application. +```bash +make install +make test-all +``` + +- **`make install`** — same as above **without** tests. +- Default **`PROVIDER_FIXTURE_PATH`** points at [`tests/Fixtures/provider_responses/response_1.xml`](tests/Fixtures/provider_responses/response_1.xml) so the first sync is **offline** and deterministic. + +Smoke test: + +```bash +curl -s http://localhost:8000/health +curl -s "http://localhost:8000/search?starts_at=2021-06-30&ends_at=2021-07-31" +``` + +**HTTP provider mode:** clear `PROVIDER_FIXTURE_PATH` in `.env` and set **`PROVIDER_URL`** to a real feed URL. + +**RabbitMQ UI:** [http://localhost:15672](http://localhost:15672) — default credentials match `.env.example` (`RABBITMQ_USER` / `RABBITMQ_PASS`, typically `events_user` / `events_pass`). + +**Troubleshooting** + +- **Makefile** uses the `docker-compose` CLI. If you only have Compose V2, install the [Compose plugin](https://docs.docker.com/compose/install/) or symlink `docker-compose` to `docker compose`. +- **First `docker compose` build** can sit on “load metadata” or “Building … (2/3)” for **several minutes**: BuildKit is waiting on **Docker Hub** for `php:8.2-fpm-alpine`. That is not an infinite loop—the timer keeps climbing until Hub responds. **Do not assume it is frozen** until ~3–5 minutes have passed on a slow network. +- **If you cancelled (Ctrl+C, exit 130)** or want to prefetch: run **`make pull-base`** (same as `docker pull php:8.2-fpm-alpine`), then `make ready` again. +- **`.env` warnings** (`PROVIDER_URL` / `PROVIDER_FIXTURE_PATH` empty): Compose is fine; for offline sync, keep `PROVIDER_FIXTURE_PATH` set in `.env` (see `.env.example`). Empty `PROVIDER_FIXTURE_PATH` forces HTTP mode and requires `PROVIDER_URL`. +- **Plain build logs (less TTY spam):** + `DOCKER_BUILDKIT=0 docker-compose -f docker/docker-compose.yml build` +- **Compose “bake” quirks:** try `COMPOSE_BAKE=false docker-compose -f docker/docker-compose.yml up -d` if your Docker version misbehaves during parallel service builds. +- **Slow first build (>5 min):** Compose uses Dockerfile **`target: dev`** — only PHP + extensions + Composer binary. Dependencies come from **`composer install` on the host** + bind mount, so the image no longer runs a full `composer install` or `COPY` of the whole tree during dev builds. A **production** image (all code inside the image): + `docker build -f docker/Dockerfile --target prod -t ticketbridge:prod ..` +- **`DeadlineExceeded` / `failed to copy` / pull timeouts:** Often Docker Hub’s CDN (the `cloudflarestorage.com` URLs) is slow or flaky. The Dockerfile **defaults to the official PHP image via AWS Public ECR**: `public.ecr.aws/docker/library/php:8.2-fpm-alpine` (same content, different host). **`make pull-base`** pulls that image before `compose up`. Override in `.env`: `PHP_BASE_IMAGE=mirror.gcr.io/library/php:8.2-fpm-alpine` or `php:8.2-fpm-alpine` if one mirror works better on your network. + +--- + +## Commands + +| Goal | Command | +|------|---------| +| **First-time / CI verification (install + all tests)** | `make ready` | +| Setup only (deps + stack + migrate + sync) | `make install` | +| **All tests** (PHPUnit `tests/`, inside `app` container) | `make test-all` | +| Start stack only | `make run` | +| Apply `schema.sql` + migration `002` (Postgres in Compose) | `make migrate` | +| Sync plans **inline** (no queue) | `make sync` | +| Publish one **async** sync job | `make enqueue-plan-sync` | +| Run sync **consumer** in foreground (same as debugging worker) | `make consume-sync` | +| Unit tests only | `make test` | +| Integration tests only (`tests/Integration`) | `make test-integration` | +| Stop stack | `make stop` | +| Remove volumes (destructive) | `make clean` | + +--- -## API contract +## API -**GET /search?starts_at=2021-06-01&ends_at=2021-07-31** +| Method | Path | Purpose | +|--------|------|---------| +| `GET` | `/health` | Liveness | +| `GET` | `/search?starts_at=YYYY-MM-DD&ends_at=YYYY-MM-DD` | Events overlapping range; min/max price per plan | +| `POST` | `/zones/{zoneUuid}/reserve` | Body `{"quantity": n}` — decrements `available_seats` under row lock | +| `POST` | `/jobs/plan-sync` | Enqueues async sync (**202** + `correlation_id`, **503** if broker down) | -Returns events in JSON following the service’s documented response shape (date fields, times, price bounds). Keep request/response fields aligned with your own OpenAPI document if you publish one alongside this repo. +**Search response** (shape illustrative): -**Example response** (illustrative; event id is assigned by this service): ```json { "events": [ @@ -34,177 +147,189 @@ Returns events in JSON following the service’s documented response shape (date } ``` -Sample provider XML used in tests is documented under [`tests/Fixtures/provider_responses/README.md`](tests/Fixtures/provider_responses/README.md). +Fixture XML formats: [`tests/Fixtures/provider_responses/README.md`](tests/Fixtures/provider_responses/README.md). --- -## Installation & Setup +## Architecture -### Prerequisites +**Dependency rule:** Domain knows nothing about HTTP, Redis, RabbitMQ, or Doctrine. Application orchestrates **use cases** and depends on **ports**; Infrastructure supplies **adapters** and **persistence**. + +```mermaid +flowchart TB + subgraph HTTP + C[Controllers] + end + subgraph Application + S[SearchEventsService / SyncPlansService / ReserveZoneCapacityService] + P[Ports: provider, parser, mapper, cache invalidation, plan-sync jobs] + end + subgraph Domain + E[Entities & value objects] + RI[Repository & persistence ports] + end + subgraph Infrastructure + DO[Doctrine repository] + RD[(Redis)] + EXT[HTTP / XML adapters] + MQ[(RabbitMQ)] + end + PG[(PostgreSQL)] + C --> S + S --> RI + S --> P + DO -.implements.- RI + EXT -.implements.- P + S --> DO + DO --> PG + S --> RD + EXT --> RD + C -->|POST /jobs/plan-sync| MQ + MQ -->|worker-plan-sync| S +``` -- Docker and Docker Compose -- Make +**Batch writes (sync):** `saveBasePlans`, `savePlans`, and `saveZones` resolve existing rows in bulk and persist in batches to avoid **O(N)** round-trips per entity. -Copy the environment template. By default **`PROVIDER_FIXTURE_PATH`** points at [`tests/Fixtures/provider_responses/response_1.xml`](tests/Fixtures/provider_responses/response_1.xml) so sync runs **offline** with the same XML used in tests. Switch to HTTP by **clearing `PROVIDER_FIXTURE_PATH`** and setting **`PROVIDER_URL`**. You can point the path at `response_2.xml` / `response_3.xml` to simulate other snapshots. +--- -```bash -cp .env.example .env -# Optional: HTTP mode — PROVIDER_FIXTURE_PATH= and PROVIDER_URL=https://... -``` +## Technical deep dives -### Quick Start (Single Command) +### Read path: `/search` without N+1 -```bash -make install -``` +`DoctrinePlanRepository::findByDateRange()` issues **a single SQL** statement: `plans` ⋈ `base_plans` ⋈ `zones` with **`MIN`/`MAX` price** per plan, overlap filter on dates, `sell_mode = 'online'`, `is_active = true`, and a correct **`GROUP BY`**. No per-plan follow-up queries—only mapping to the API shape. -This single command will: -- Install PHP dependencies via Composer -- Build and start Docker services (PostgreSQL, Redis, RabbitMQ) -- Create database schema -- Synchronize initial data from the provider +Indexes in [`database/schema.sql`](database/schema.sql) match that access pattern, notably: -**That's it!** The API is ready at `http://localhost:8000` +- `idx_plans_date_range` on `(plan_start_date, plan_end_date)` **where `is_active = true`** +- `idx_zones_price` on `(plan_id, price)` -### Verify Installation +### Cache: epoch invalidation + stampede control -Test the API: -```bash -# Health check -curl http://localhost:8000/health +- **Invalidation:** `INCR search:epoch` after a **successful** sync commit—no `KEYS` scan, no blocking wide deletes. +- **Keys:** `search:v{epoch}:{from}:{to}` with TTL from **`CACHE_SEARCH_TTL`**. +- **Stampede:** on miss, a short-lived **`SET NX EX`** lock per logical key; other waiters poll **`GET`** before optionally hitting the DB once. +- **Degradation:** if Redis is down, search still reads PostgreSQL. -# Search endpoint -curl "http://localhost:8000/search?starts_at=2021-06-30&ends_at=2021-07-31" -``` +### Write path: transactional sync -Run tests: -```bash -make test -``` +`PlansPersistencePort::runSyncTransaction` wraps base plans, plans, zones, and inactive marking in **one transaction**. **`CacheInvalidationPort`** runs **after** commit so failed syncs never leave search pointing at half-written data. -## Commands +### Inventory & overselling -- `make install` - Complete setup (installs, starts services, syncs data) -- `make sync` - Sync plans from provider (update data) -- `make test` - Run tests -- `make stop` - Stop services +- Column **`zones.available_seats`** (with migration for existing DBs). +- **`POST /zones/.../reserve`:** transaction + **`SELECT … FOR UPDATE`** on the zone row, check capacity, update counter; **`422`** if insufficient. +- **Guarantee:** on **one PostgreSQL**, two concurrent reservers for the same zone cannot both succeed past the check. **Not** a cross-shard story—documented as a known boundary. -## Architecture +### Provider resilience (sync only) -The solution follows **Domain-Driven Design (DDD)** principles with a clean architecture: +Configurable **`EVENT_PROVIDER_*`** timeouts, retries, and **circuit breaker** thresholds (`EVENT_PROVIDER_CB_*`), implemented around the HTTP client used **only** for ingestion—not for `/search`. -- **Domain**: Business logic (Entities, Value Objects, Repository Interfaces) -- **Application**: Use cases (Services, DTOs, Mappers) -- **Infrastructure**: Technical layer (Doctrine, HTTP, External APIs, Redis) +### RabbitMQ: async plan sync -### Key Design Decisions +Decouples **enqueue** from **execute**; durable queue, **manual ack**, **`nack` without requeue** on poison/terminal failure → **DLQ** for inspection. -1. **DDD with Pragmatic Approach**: Domain entities are pure business objects, separated from persistence concerns. Doctrine entities handle database mapping. +| Artifact | Name | +|----------|------| +| Exchange (topic) | `ticketbridge` | +| Routing key | `sync.plans` | +| Queue | `ticketbridge.sync.plans` | +| DLX | `ticketbridge.dlx` | +| DLQ | `ticketbridge.sync.plans.dlq` | -2. **Batch Processing**: All database saves use batch operations to minimize queries: - - `saveBasePlans()`: Single query to fetch existing, then batch persist - - `savePlans()`: Single query to fetch existing, then batch persist - - `saveZones()`: Single query to fetch existing zones, then batch persist - - **Result**: Reduced from O(N*M) queries to O(1) queries per batch +Port: **`PlanSyncJobPublisherPort`** — implementation: **`AmqpPlanSyncJobPublisher`**. Consumer: **`scripts/rabbitmq-consume-plan-sync.php`**; Compose service **`worker-plan-sync`**. **Inline** sync remains **`make sync`** for CI and debugging. -3. **Optimized Queries**: - - `findByDateRange()` uses raw SQL with JOINs and aggregations (MIN/MAX) to fetch all data in a single query - - Eliminated N+1 queries by constructing Domain objects directly from SQL results +```mermaid +sequenceDiagram + participant Client + participant API + participant Broker as RabbitMQ + participant Worker + participant DB as PostgreSQL + Client->>API: POST /jobs/plan-sync + API->>Broker: publish sync_plans (persistent) + API-->>Client: 202 correlation_id + Worker->>Broker: consume (prefetch 1) + Worker->>DB: transactional SyncPlansService + Worker->>Broker: ack (or nack to DLQ) +``` -4. **Caching Strategy**: - - Redis cache for `/search` endpoint results (60s TTL) - - Cache invalidation on sync - - Graceful degradation if Redis is unavailable +--- + +## Configuration -5. **Resilience**: - - Retry logic with exponential backoff for external API calls - - Timeout configuration for HTTP requests - - Error handling that doesn't break the application +Copy [`.env.example`](.env.example) to `.env`. Highlights: -6. **Sync use case & ports (multi-provider)**: - - `SyncPlansService` depends only on **port interfaces** (`App\Application\Port\*`, `App\Domain\Port\PlansPersistencePort`), not on concrete HTTP/XML/Redis types. - - Default wiring uses **adapters** in `App\Infrastructure\Adapter\Sync\` around the existing HTTP client, XML parser, domain mapper, and Redis cache invalidation. - - **Adding another provider** (same domain model): implement `PlansProviderPort` and `PlansPayloadParserPort` (and a mapper port if the normalized row shape differs); register in `scripts/sync-plans.php` or a factory (e.g. `SYNC_PROVIDER=acme`). - - **Different product rules** (webhooks, extra steps): prefer new application services or narrower ports (interface segregation) instead of one oversized port. +| Area | Variables (examples) | +|------|----------------------| +| Database | `DB_HOST`, `DB_PORT`, `DB_DATABASE`, `DB_USERNAME`, `DB_PASSWORD` | +| Redis | `REDIS_HOST`, `REDIS_PORT` | +| RabbitMQ | `RABBITMQ_HOST`, `RABBITMQ_PORT`, `RABBITMQ_USER`, `RABBITMQ_PASS`, optional `RABBITMQ_VHOST`, queue/exchange overrides | +| Provider | `PROVIDER_FIXTURE_PATH` and/or `PROVIDER_URL` | +| HTTP / CB (sync) | `EVENT_PROVIDER_TIMEOUT`, `EVENT_PROVIDER_CONNECT_TIMEOUT`, `EVENT_PROVIDER_RETRY_*`, `EVENT_PROVIDER_CB_*` | +| Search cache | `CACHE_SEARCH_TTL` | + +--- -### Sync transaction and seat inventory (phase 2) +## Testing -- **Single DB transaction** for the full sync write path (`PlansPersistencePort::runSyncTransaction`): base plans, plans, zones, and inactive marking commit together or roll back together. **Search cache** is invalidated only **after** a successful commit. -- **`zones.available_seats`**: on new zones equals `capacity`; when the provider updates `capacity` on an existing zone, **sold seats** (`capacity - available_seats`) are preserved and the new `available_seats` is recomputed (never below zero). -- **Reservations**: `POST /zones/{zoneUuid}/reserve` with body `{"quantity":1}` runs `SELECT … FOR UPDATE` on the zone row, checks `available_seats`, then decrements in the same transaction. **Insufficient capacity** returns HTTP 422 with a clear message. -- **Concurrency**: locking is **per zone row** in PostgreSQL. Two concurrent requests for the same zone cannot both pass the availability check; one waits on the lock, then sees the updated `available_seats`. This does not by itself coordinate **sharded** databases; for very high contention you would add partitioning by event, queues, or optimistic versioning. -- **Existing databases**: run `database/migrations/002_zones_available_seats.sql` (also invoked by `make migrate` after `schema.sql`). +```bash +make test-all # recommended: full suite inside Docker (matches CI expectations) +make test # unit only +make test-integration +``` -## Performance Optimizations +Tests run **inside the `app` container** so you do not need PHP extensions on the host beyond what Composer needs. The stack must be up (`make install` or `make run`). -### Database -- **Batch saves**: All inserts/updates use batch operations -- **Optimized indexes**: - - `idx_plans_date_range` on `(plan_start_date, plan_end_date)` with `WHERE is_active = TRUE` - - `idx_zones_price` on `(plan_id, price)` for price aggregation -- **Single query for search**: JOINs and aggregations in one query +- **Unit:** XML parser, value objects, retry strategy, sync orchestration with **mocked ports**, file provider adapter. +- **Integration (DB):** zone reservation / capacity against PostgreSQL. +- **Integration (API):** smoke against `GET /search` on the container’s PHP built-in server (same container as PHPUnit). -### Caching -- Redis cache for search results (60s TTL) -- Cache key: `search:{starts_at}:{ends_at}` -- Automatic invalidation on data sync +--- -### Scalability Considerations +## Repository layout -**For thousands of plans with hundreds of zones:** -- Batch processing already implemented (handles large datasets efficiently) -- Database indexes optimized for date range queries -- Soft delete pattern (`is_active` flag) instead of hard deletes +| Path | Role | +|------|------| +| `src/events/Domain/` | Entities, VOs, repository contracts, domain ports & exceptions | +| `src/events/Application/` | Services, mappers, application ports | +| `src/events/Infrastructure/` | Doctrine, HTTP, XML, Redis, RabbitMQ, adapters, HTTP controllers | +| `public/index.php` | HTTP bootstrap (wiring is explicit—no hidden DI container) | +| `scripts/` | `sync-plans.php`, RabbitMQ consumer & enqueue CLI | +| `src/events/Infrastructure/Bootstrap/` | `SyncPlansServiceFactory` — shared wiring for CLI sync and the worker | +| `database/` | `schema.sql`, migrations (e.g. `available_seats`) | +| `tests/Fixtures/provider_responses/` | Neutral sample XML used by tests and offline sync | +| `docker/` | `Dockerfile`, `docker-compose.yml` | -**For 5k-10k requests per second:** -- Redis caching reduces database load by ~90% (cache hit rate) -- Single optimized SQL query for cache misses -- Connection pooling via Doctrine -- Horizontal scaling ready (stateless API, shared Redis/PostgreSQL) +--- -**Future enhancements:** -- Read replicas for PostgreSQL -- Redis cluster for distributed caching -- Message queue (RabbitMQ) for async sync operations -- API rate limiting -- Response compression +## Extending to another provider -## Testing +1. **Access** — Implement **`PlansProviderPort`** (HTTP, file, queue, etc.). +2. **Format** — If not the current XML, implement **`PlansPayloadParserPort`** (and/or an extra mapper port) so the domain still receives the same normalized structure. +3. **Wire** — Register adapters in **`SyncPlansServiceFactory`** so CLI sync, the RabbitMQ worker, and any future entry points share one composition root. -Run unit tests: -```bash -make test -``` +Prefer **new services or narrower ports** over inflating a single “do everything” interface. -Integration tests (PostgreSQL via Docker `app` container; requires migrated schema with `available_seats`): -```bash -make test-integration -``` +--- -Tests cover: -- XML parsing (PlanListParser) -- Domain value objects (Price) -- Sync orchestration with mocked ports (`SyncPlansServiceTest`) -- Local XML provider fixture (`FilePlansProviderAdapterTest`) -- Zone reservation / oversell protection (`tests/Integration/Database/ZoneReservationIntegrationTest`) +## Branding & history -## Development +- **Neutral sample data** (“Acme”-style); no third-party brand in code paths. +- **Configuration** (`PROVIDER_URL`, `PROVIDER_FIXTURE_PATH`) is the integration boundary. +- Evolved from a prior vendor-specific exercise into a **generic** ingestion + search + reservation service; treat any stray legacy naming as cosmetic cleanup. -### Sync data from provider +--- -To update data from the external provider: +## Production & security notes -```bash -make sync -``` +This repo is **interview- and demo-oriented**, not a hardened production drop-in: -This command fetches the latest plans from the provider API, updates the database, and invalidates the cache. +- **`POST /jobs/plan-sync`** has **no authentication**—put it behind auth, network policy, or remove the route if unused. +- Add **TLS**, **secrets management**, **structured logging**, **metrics**, and **rate limiting** for real traffic. +- **Rebuild images** after dependency changes; bind mounts use the host `vendor/` in dev. -**Note:** The sync can be scheduled via cron for automatic synchronization. +--- -### Database access +## Scaling ideas -Connect to PostgreSQL: -```bash -docker-compose -f docker/docker-compose.yml exec postgres psql -U events_user -d events_db -``` +Read replicas, Redis Cluster, additional job types on the same broker, horizontal **worker** replicas (watch DB contention on sync), API rate limits, compression—the current design keeps **extension points** (ports, workers, indexes) explicit so these can be added without rewriting the core model. diff --git a/composer.json b/composer.json index 9ecc359..000cd47 100644 --- a/composer.json +++ b/composer.json @@ -16,7 +16,8 @@ "symfony/routing": "^6.4", "symfony/cache": "^6.4", "guzzlehttp/guzzle": "^7.5", - "predis/predis": "^2.0" + "predis/predis": "^2.0", + "php-amqplib/php-amqplib": "^3.0" }, "require-dev": { "phpunit/phpunit": "^10.0" diff --git a/composer.lock b/composer.lock index 564a1a8..5251e47 100644 --- a/composer.lock +++ b/composer.lock @@ -4,7 +4,7 @@ "Read more about it at https://getcomposer.org/doc/01-basic-usage.md#installing-dependencies", "This file is @generated automatically" ], - "content-hash": "0aaa3c251a1dc4ca9d01198e5da68fcf", + "content-hash": "e3ab6ded2ebc069eff9c92875322386e", "packages": [ { "name": "brick/math", @@ -1349,6 +1349,316 @@ ], "time": "2025-08-23T21:21:41+00:00" }, + { + "name": "paragonie/constant_time_encoding", + "version": "v3.1.3", + "source": { + "type": "git", + "url": "https://github.com/paragonie/constant_time_encoding.git", + "reference": "d5b01a39b3415c2cd581d3bd3a3575c1ebbd8e77" + }, + "dist": { + "type": "zip", + "url": "https://api.github.com/repos/paragonie/constant_time_encoding/zipball/d5b01a39b3415c2cd581d3bd3a3575c1ebbd8e77", + "reference": "d5b01a39b3415c2cd581d3bd3a3575c1ebbd8e77", + "shasum": "" + }, + "require": { + "php": "^8" + }, + "require-dev": { + "infection/infection": "^0", + "nikic/php-fuzzer": "^0", + "phpunit/phpunit": "^9|^10|^11", + "vimeo/psalm": "^4|^5|^6" + }, + "type": "library", + "autoload": { + "psr-4": { + "ParagonIE\\ConstantTime\\": "src/" + } + }, + "notification-url": "https://packagist.org/downloads/", + "license": [ + "MIT" + ], + "authors": [ + { + "name": "Paragon Initiative Enterprises", + "email": "security@paragonie.com", + "homepage": "https://paragonie.com", + "role": "Maintainer" + }, + { + "name": "Steve 'Sc00bz' Thomas", + "email": "steve@tobtu.com", + "homepage": "https://www.tobtu.com", + "role": "Original Developer" + } + ], + "description": "Constant-time Implementations of RFC 4648 Encoding (Base-64, Base-32, Base-16)", + "keywords": [ + "base16", + "base32", + "base32_decode", + "base32_encode", + "base64", + "base64_decode", + "base64_encode", + "bin2hex", + "encoding", + "hex", + "hex2bin", + "rfc4648" + ], + "support": { + "email": "info@paragonie.com", + "issues": "https://github.com/paragonie/constant_time_encoding/issues", + "source": "https://github.com/paragonie/constant_time_encoding" + }, + "time": "2025-09-24T15:06:41+00:00" + }, + { + "name": "paragonie/random_compat", + "version": "v9.99.100", + "source": { + "type": "git", + "url": "https://github.com/paragonie/random_compat.git", + "reference": "996434e5492cb4c3edcb9168db6fbb1359ef965a" + }, + "dist": { + "type": "zip", + "url": "https://api.github.com/repos/paragonie/random_compat/zipball/996434e5492cb4c3edcb9168db6fbb1359ef965a", + "reference": "996434e5492cb4c3edcb9168db6fbb1359ef965a", + "shasum": "" + }, + "require": { + "php": ">= 7" + }, + "require-dev": { + "phpunit/phpunit": "4.*|5.*", + "vimeo/psalm": "^1" + }, + "suggest": { + "ext-libsodium": "Provides a modern crypto API that can be used to generate random bytes." + }, + "type": "library", + "notification-url": "https://packagist.org/downloads/", + "license": [ + "MIT" + ], + "authors": [ + { + "name": "Paragon Initiative Enterprises", + "email": "security@paragonie.com", + "homepage": "https://paragonie.com" + } + ], + "description": "PHP 5.x polyfill for random_bytes() and random_int() from PHP 7", + "keywords": [ + "csprng", + "polyfill", + "pseudorandom", + "random" + ], + "support": { + "email": "info@paragonie.com", + "issues": "https://github.com/paragonie/random_compat/issues", + "source": "https://github.com/paragonie/random_compat" + }, + "time": "2020-10-15T08:29:30+00:00" + }, + { + "name": "php-amqplib/php-amqplib", + "version": "v3.7.4", + "source": { + "type": "git", + "url": "https://github.com/php-amqplib/php-amqplib.git", + "reference": "381b6f7c600e0e0c7463cdd7f7a1a3bc6268e5fd" + }, + "dist": { + "type": "zip", + "url": "https://api.github.com/repos/php-amqplib/php-amqplib/zipball/381b6f7c600e0e0c7463cdd7f7a1a3bc6268e5fd", + "reference": "381b6f7c600e0e0c7463cdd7f7a1a3bc6268e5fd", + "shasum": "" + }, + "require": { + "ext-mbstring": "*", + "ext-sockets": "*", + "php": "^7.2||^8.0", + "phpseclib/phpseclib": "^2.0|^3.0" + }, + "conflict": { + "php": "7.4.0 - 7.4.1" + }, + "replace": { + "videlalvaro/php-amqplib": "self.version" + }, + "require-dev": { + "ext-curl": "*", + "nategood/httpful": "^0.2.20", + "phpunit/phpunit": "^7.5|^9.5", + "squizlabs/php_codesniffer": "^3.6" + }, + "type": "library", + "extra": { + "branch-alias": { + "dev-master": "3.0-dev" + } + }, + "autoload": { + "psr-4": { + "PhpAmqpLib\\": "PhpAmqpLib/" + } + }, + "notification-url": "https://packagist.org/downloads/", + "license": [ + "LGPL-2.1-or-later" + ], + "authors": [ + { + "name": "Alvaro Videla", + "role": "Original Maintainer" + }, + { + "name": "Raúl Araya", + "email": "nubeiro@gmail.com", + "role": "Maintainer" + }, + { + "name": "Luke Bakken", + "email": "luke@bakken.io", + "role": "Maintainer" + }, + { + "name": "Ramūnas Dronga", + "email": "github@ramuno.lt", + "role": "Maintainer" + } + ], + "description": "Formerly videlalvaro/php-amqplib. This library is a pure PHP implementation of the AMQP protocol. It's been tested against RabbitMQ.", + "homepage": "https://github.com/php-amqplib/php-amqplib/", + "keywords": [ + "message", + "queue", + "rabbitmq" + ], + "support": { + "issues": "https://github.com/php-amqplib/php-amqplib/issues", + "source": "https://github.com/php-amqplib/php-amqplib/tree/v3.7.4" + }, + "time": "2025-11-23T17:00:56+00:00" + }, + { + "name": "phpseclib/phpseclib", + "version": "3.0.50", + "source": { + "type": "git", + "url": "https://github.com/phpseclib/phpseclib.git", + "reference": "aa6ad8321ed103dc3624fb600a25b66ebf78ec7b" + }, + "dist": { + "type": "zip", + "url": "https://api.github.com/repos/phpseclib/phpseclib/zipball/aa6ad8321ed103dc3624fb600a25b66ebf78ec7b", + "reference": "aa6ad8321ed103dc3624fb600a25b66ebf78ec7b", + "shasum": "" + }, + "require": { + "paragonie/constant_time_encoding": "^1|^2|^3", + "paragonie/random_compat": "^1.4|^2.0|^9.99.99", + "php": ">=5.6.1" + }, + "require-dev": { + "phpunit/phpunit": "*" + }, + "suggest": { + "ext-dom": "Install the DOM extension to load XML formatted public keys.", + "ext-gmp": "Install the GMP (GNU Multiple Precision) extension in order to speed up arbitrary precision integer arithmetic operations.", + "ext-libsodium": "SSH2/SFTP can make use of some algorithms provided by the libsodium-php extension.", + "ext-mcrypt": "Install the Mcrypt extension in order to speed up a few other cryptographic operations.", + "ext-openssl": "Install the OpenSSL extension in order to speed up a wide variety of cryptographic operations." + }, + "type": "library", + "autoload": { + "files": [ + "phpseclib/bootstrap.php" + ], + "psr-4": { + "phpseclib3\\": "phpseclib/" + } + }, + "notification-url": "https://packagist.org/downloads/", + "license": [ + "MIT" + ], + "authors": [ + { + "name": "Jim Wigginton", + "email": "terrafrost@php.net", + "role": "Lead Developer" + }, + { + "name": "Patrick Monnerat", + "email": "pm@datasphere.ch", + "role": "Developer" + }, + { + "name": "Andreas Fischer", + "email": "bantu@phpbb.com", + "role": "Developer" + }, + { + "name": "Hans-Jürgen Petrich", + "email": "petrich@tronic-media.com", + "role": "Developer" + }, + { + "name": "Graham Campbell", + "email": "graham@alt-three.com", + "role": "Developer" + } + ], + "description": "PHP Secure Communications Library - Pure-PHP implementations of RSA, AES, SSH2, SFTP, X.509 etc.", + "homepage": "http://phpseclib.sourceforge.net", + "keywords": [ + "BigInteger", + "aes", + "asn.1", + "asn1", + "blowfish", + "crypto", + "cryptography", + "encryption", + "rsa", + "security", + "sftp", + "signature", + "signing", + "ssh", + "twofish", + "x.509", + "x509" + ], + "support": { + "issues": "https://github.com/phpseclib/phpseclib/issues", + "source": "https://github.com/phpseclib/phpseclib/tree/3.0.50" + }, + "funding": [ + { + "url": "https://github.com/terrafrost", + "type": "github" + }, + { + "url": "https://www.patreon.com/phpseclib", + "type": "patreon" + }, + { + "url": "https://tidelift.com/funding/github/packagist/phpseclib/phpseclib", + "type": "tidelift" + } + ], + "time": "2026-03-19T02:57:58+00:00" + }, { "name": "predis/predis", "version": "v2.4.1", diff --git a/docker/Dockerfile b/docker/Dockerfile index 077402f..7c87047 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,13 @@ -FROM php:8.2-fpm-alpine +# +# dev — PHP + extensions + Composer binary only. Code and vendor come from the host bind-mount +# (see docker-compose.yml). First build ~1–3 min (compile extensions); no composer install in image. +# prod — final stage: copies project and runs composer install --no-dev (for deployable images). +# +# Base image default: AWS Public ECR mirror of official php (same layers as Docker Hub, different CDN). +# If this fails, override e.g. PHP_BASE_IMAGE=mirror.gcr.io/library/php:8.2-fpm-alpine or php:8.2-fpm-alpine +ARG PHP_BASE_IMAGE=public.ecr.aws/docker/library/php:8.2-fpm-alpine +FROM ${PHP_BASE_IMAGE} AS base -# Install system dependencies RUN apk add --no-cache \ git \ curl \ @@ -12,27 +19,21 @@ RUN apk add --no-cache \ linux-headers \ $PHPIZE_DEPS -# Install PHP extensions RUN docker-php-ext-install pdo pdo_pgsql zip sockets -# Install Composer -COPY --from=composer:latest /usr/bin/composer /usr/bin/composer +RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer -# Set working directory +# --- Local development (default for docker-compose) --- +FROM base AS dev WORKDIR /var/www +EXPOSE 8000 +CMD ["php", "-S", "0.0.0.0:8000", "-t", "public"] -# Copy composer files +# --- Production-style image (no bind mount) --- +FROM base AS prod +WORKDIR /var/www COPY composer.json composer.lock* ./ - -# Install dependencies RUN composer install --no-dev --optimize-autoloader --no-interaction - -# Copy application code COPY . . - -# Expose port EXPOSE 8000 - -# Default command (can be overridden in docker-compose.yml) CMD ["php", "-S", "0.0.0.0:8000", "-t", "public"] - diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index e07443e..2a981d2 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -3,6 +3,10 @@ services: build: context: .. dockerfile: docker/Dockerfile + target: dev + pull: false + args: + PHP_BASE_IMAGE: ${PHP_BASE_IMAGE:-public.ecr.aws/docker/library/php:8.2-fpm-alpine} volumes: - ..:/var/www working_dir: /var/www @@ -24,10 +28,43 @@ services: - RABBITMQ_PORT=${RABBITMQ_PORT:-5672} - RABBITMQ_USER=${RABBITMQ_USER:-events_user} - RABBITMQ_PASS=${RABBITMQ_PASS:-events_pass} - - PROVIDER_URL=${PROVIDER_URL} - - PROVIDER_FIXTURE_PATH=${PROVIDER_FIXTURE_PATH} + - PROVIDER_URL=${PROVIDER_URL:-} + - PROVIDER_FIXTURE_PATH=${PROVIDER_FIXTURE_PATH:-tests/Fixtures/provider_responses/response_1.xml} command: php -S 0.0.0.0:8000 -t public + worker-plan-sync: + build: + context: .. + dockerfile: docker/Dockerfile + target: dev + pull: false + args: + PHP_BASE_IMAGE: ${PHP_BASE_IMAGE:-public.ecr.aws/docker/library/php:8.2-fpm-alpine} + volumes: + - ..:/var/www + working_dir: /var/www + depends_on: + - postgres + - redis + - rabbitmq + environment: + - DB_HOST=${DB_HOST:-postgres} + - DB_PORT=${DB_PORT:-5432} + - DB_DATABASE=${DB_DATABASE:-events_db} + - DB_USERNAME=${DB_USERNAME:-events_user} + - DB_PASSWORD=${DB_PASSWORD:-events_pass} + - REDIS_HOST=${REDIS_HOST:-redis} + - REDIS_PORT=${REDIS_PORT:-6379} + - RABBITMQ_HOST=${RABBITMQ_HOST:-rabbitmq} + - RABBITMQ_PORT=${RABBITMQ_PORT:-5672} + - RABBITMQ_USER=${RABBITMQ_USER:-events_user} + - RABBITMQ_PASS=${RABBITMQ_PASS:-events_pass} + - RABBITMQ_VHOST=${RABBITMQ_VHOST:-/} + - PROVIDER_URL=${PROVIDER_URL:-} + - PROVIDER_FIXTURE_PATH=${PROVIDER_FIXTURE_PATH:-tests/Fixtures/provider_responses/response_1.xml} + command: php scripts/rabbitmq-consume-plan-sync.php + restart: unless-stopped + postgres: image: postgres:15-alpine environment: diff --git a/public/index.php b/public/index.php index eb76294..8a90b91 100644 --- a/public/index.php +++ b/public/index.php @@ -9,11 +9,12 @@ use App\Domain\Exception\InsufficientCapacityException; use App\Domain\Exception\InvalidInputException; use App\Domain\Exception\RepositoryException; -use App\Infrastructure\Exception\ProviderException; use App\Infrastructure\Http\Controller\HealthController; +use App\Infrastructure\Http\Controller\PlanSyncJobsController; use App\Infrastructure\Http\Controller\ReservationsController; use App\Infrastructure\Http\Controller\SearchEventsController; use App\Infrastructure\Http\Router\RoutesLoader; +use App\Infrastructure\Messaging\RabbitMq\AmqpPlanSyncJobPublisher; use App\Infrastructure\Persistence\Cache\RedisCache; use App\Infrastructure\Persistence\Doctrine\Repository\DoctrinePlanRepository; use Doctrine\DBAL\Exception as DBALException; @@ -49,13 +50,20 @@ getenv('REDIS_HOST') ?: 'redis', (int)(getenv('REDIS_PORT') ?: 6379) ), - new DomainToApiMapper() + new DomainToApiMapper(), + (int)(getenv('CACHE_SEARCH_TTL') ?: 60) ); $searchController = new SearchEventsController($searchService); $healthController = new HealthController(); $reservationsController = new ReservationsController(new ReserveZoneCapacityService($repository)); -$routes = (new RoutesLoader())->load($searchController, $healthController, $reservationsController); +$planSyncJobsController = new PlanSyncJobsController(AmqpPlanSyncJobPublisher::fromEnvironment()); +$routes = (new RoutesLoader())->load( + $searchController, + $healthController, + $reservationsController, + $planSyncJobsController +); $request = Request::createFromGlobals(); $context = new RequestContext(); diff --git a/scripts/enqueue-plan-sync.php b/scripts/enqueue-plan-sync.php new file mode 100644 index 0000000..3868712 --- /dev/null +++ b/scripts/enqueue-plan-sync.php @@ -0,0 +1,18 @@ +publishPlanSyncJob(); + echo "Queued plan sync job. correlation_id={$id}\n"; +} catch (\Throwable $e) { + fwrite(STDERR, 'Failed to enqueue: ' . $e->getMessage() . "\n"); + exit(1); +} diff --git a/scripts/rabbitmq-consume-plan-sync.php b/scripts/rabbitmq-consume-plan-sync.php new file mode 100644 index 0000000..0c84026 --- /dev/null +++ b/scripts/rabbitmq-consume-plan-sync.php @@ -0,0 +1,45 @@ +getMessage())); + fwrite(STDERR, "Could not connect to database.\n"); + exit(1); +} + +$consumer = AmqpPlanSyncConsumer::fromEnvironment(); + +echo "[RabbitMQ] Plan sync consumer started; waiting for messages…\n"; + +try { + $consumer->consume(static function () use ($syncPlansService): void { + try { + $syncPlansService->sync(); + } catch (ProviderException $e) { + error_log('[RabbitMQ worker] Provider error: ' . $e->getMessage()); + throw $e; + } catch (DBALException $e) { + error_log('[RabbitMQ worker] Database error: ' . $e->getMessage()); + throw $e; + } + }); +} catch (\Throwable $e) { + error_log('[RabbitMQ worker] Consumer stopped: ' . $e->getMessage()); + fwrite(STDERR, $e->getMessage() . "\n"); + exit(1); +} diff --git a/scripts/sync-plans.php b/scripts/sync-plans.php index fd1561a..0e738fc 100644 --- a/scripts/sync-plans.php +++ b/scripts/sync-plans.php @@ -2,74 +2,20 @@ require __DIR__ . '/../vendor/autoload.php'; -use App\Application\Mapper\ProviderToDomainMapper; -use App\Application\Service\SyncPlansService; -use App\Infrastructure\Adapter\Sync\FilePlansProviderAdapter; -use App\Infrastructure\Adapter\Sync\HttpPlansProviderAdapter; -use App\Infrastructure\Adapter\Sync\ProviderToDomainMapperAdapter; -use App\Infrastructure\Adapter\Sync\RedisSearchCacheInvalidationAdapter; -use App\Infrastructure\Adapter\Sync\XmlPlansPayloadParserAdapter; +use App\Infrastructure\Bootstrap\SyncPlansServiceFactory; use App\Infrastructure\Exception\ProviderException; -use App\Infrastructure\External\Provider\EventProviderClient; -use App\Infrastructure\External\Xml\PlanListParser; -use App\Infrastructure\Persistence\Cache\RedisCache; -use App\Infrastructure\Persistence\Doctrine\Repository\DoctrinePlanRepository; -use Doctrine\ORM\EntityManager; -use Doctrine\ORM\ORMSetup; -use Doctrine\DBAL\DriverManager; use Doctrine\DBAL\Exception as DBALException; -use Doctrine\DBAL\Types\Type; -use Ramsey\Uuid\Doctrine\UuidType; -$doctrineConfig = require __DIR__ . '/../config/doctrine.php'; - -if (!Type::hasType('uuid')) { - Type::addType('uuid', UuidType::class); -} - -$config = ORMSetup::createAttributeMetadataConfiguration( - paths: [$doctrineConfig['mappings']['dir']], - isDevMode: true -); +$projectRoot = dirname(__DIR__); try { - $connection = DriverManager::getConnection($doctrineConfig['connection'], $config); - $connection->getDatabasePlatform()->registerDoctrineTypeMapping('uuid', 'uuid'); - $connection->connect(); + $syncPlansService = SyncPlansServiceFactory::create($projectRoot); } catch (DBALException $e) { error_log(sprintf('[Sync] Database connection failed: %s - %s', $e->getMessage(), $e->getTraceAsString())); echo "Error: Could not connect to database. Verify that PostgreSQL is running.\n"; exit(1); } -$entityManager = new EntityManager($connection, $config); -$repository = new DoctrinePlanRepository($entityManager); - -$redisCache = new RedisCache( - getenv('REDIS_HOST') ?: 'redis', - (int)(getenv('REDIS_PORT') ?: 6379) -); - -$projectRoot = dirname(__DIR__); -$fixtureRaw = getenv('PROVIDER_FIXTURE_PATH'); -$fixturePath = is_string($fixtureRaw) ? trim($fixtureRaw) : ''; -if ($fixturePath !== '') { - $absoluteFixture = str_starts_with($fixturePath, '/') - ? $fixturePath - : $projectRoot . '/' . $fixturePath; - $plansProvider = new FilePlansProviderAdapter($absoluteFixture); -} else { - $plansProvider = new HttpPlansProviderAdapter(new EventProviderClient($redisCache)); -} - -$syncPlansService = new SyncPlansService( - $plansProvider, - new XmlPlansPayloadParserAdapter(new PlanListParser()), - new ProviderToDomainMapperAdapter(new ProviderToDomainMapper()), - $repository, - new RedisSearchCacheInvalidationAdapter($redisCache) -); - try { echo "Syncing plans...\n"; $syncPlansService->sync(); diff --git a/src/events/Application/Port/PlanSyncJobPublisherPort.php b/src/events/Application/Port/PlanSyncJobPublisherPort.php new file mode 100644 index 0000000..8c6b474 --- /dev/null +++ b/src/events/Application/Port/PlanSyncJobPublisherPort.php @@ -0,0 +1,14 @@ +cache->setIfNotExists($lockKey, '1', self::FILL_LOCK_TTL_SECONDS)) { + try { + return $this->loadFromDatabaseAndCache($cacheKey, $startsAt, $endsAt); + } finally { + $this->cache->delete($lockKey); + } + } + + for ($i = 0; $i < self::STAMPEDE_MAX_ATTEMPTS; $i++) { + usleep(self::STAMPEDE_WAIT_US); + $cached = $this->cache->get($cacheKey); + if ($cached !== null) { + return json_decode($cached, true); + } + } + + return $this->loadFromDatabaseAndCache($cacheKey, $startsAt, $endsAt); + } + + private function loadFromDatabaseAndCache(string $cacheKey, DateTime $startsAt, DateTime $endsAt): array + { $plans = $this->repository->findByDateRange($startsAt, $endsAt); $events = []; foreach ($plans as $planData) { - $eventSummary = $this->mapper->toEventSummary($planData); - $events[] = $eventSummary->toArray(); + $events[] = $this->mapper->toEventSummary($planData)->toArray(); } - $this->cache->set($cacheKey, json_encode($events), 60); + $this->cache->set($cacheKey, json_encode($events), $this->cacheTtlSeconds); return $events; } private function generateCacheKey(DateTime $startsAt, DateTime $endsAt): string { - return sprintf('search:%s:%s', $startsAt->format('Y-m-d'), $endsAt->format('Y-m-d')); + $epoch = $this->cache->getSearchCacheEpoch(); + + return sprintf( + 'search:v%d:%s:%s', + $epoch, + $startsAt->format('Y-m-d'), + $endsAt->format('Y-m-d') + ); } } - diff --git a/src/events/Infrastructure/Adapter/Sync/RedisSearchCacheInvalidationAdapter.php b/src/events/Infrastructure/Adapter/Sync/RedisSearchCacheInvalidationAdapter.php index 3f407c4..7c0a624 100644 --- a/src/events/Infrastructure/Adapter/Sync/RedisSearchCacheInvalidationAdapter.php +++ b/src/events/Infrastructure/Adapter/Sync/RedisSearchCacheInvalidationAdapter.php @@ -14,6 +14,6 @@ public function __construct( public function invalidateSearchResults(): void { - $this->cache->delete('search:*'); + $this->cache->bumpSearchCacheEpoch(); } } diff --git a/src/events/Infrastructure/Bootstrap/SyncPlansServiceFactory.php b/src/events/Infrastructure/Bootstrap/SyncPlansServiceFactory.php new file mode 100644 index 0000000..251603b --- /dev/null +++ b/src/events/Infrastructure/Bootstrap/SyncPlansServiceFactory.php @@ -0,0 +1,75 @@ +getDatabasePlatform()->registerDoctrineTypeMapping('uuid', 'uuid'); + $connection->connect(); + + $entityManager = new EntityManager($connection, $config); + $repository = new DoctrinePlanRepository($entityManager); + + $redisCache = new RedisCache( + getenv('REDIS_HOST') ?: 'redis', + (int)(getenv('REDIS_PORT') ?: 6379) + ); + + $fixtureRaw = getenv('PROVIDER_FIXTURE_PATH'); + $fixturePath = is_string($fixtureRaw) ? trim($fixtureRaw) : ''; + if ($fixturePath !== '') { + $absoluteFixture = str_starts_with($fixturePath, '/') + ? $fixturePath + : $projectRoot . '/' . $fixturePath; + $plansProvider = new FilePlansProviderAdapter($absoluteFixture); + } else { + $plansProvider = new HttpPlansProviderAdapter(new EventProviderClient($redisCache)); + } + + return new SyncPlansService( + $plansProvider, + new XmlPlansPayloadParserAdapter(new PlanListParser()), + new ProviderToDomainMapperAdapter(new ProviderToDomainMapper()), + $repository, + new RedisSearchCacheInvalidationAdapter($redisCache) + ); + } +} diff --git a/src/events/Infrastructure/External/Provider/EventProviderClient.php b/src/events/Infrastructure/External/Provider/EventProviderClient.php index 69b4b2b..98313fc 100644 --- a/src/events/Infrastructure/External/Provider/EventProviderClient.php +++ b/src/events/Infrastructure/External/Provider/EventProviderClient.php @@ -11,11 +11,14 @@ class EventProviderClient { private Client $client; + private string $url; + private CircuitBreaker $circuitBreaker; + private RetryStrategy $retryStrategy; - public function __construct(?RedisCache $cache = null, int $timeout = 10, int $maxRetries = 3) + public function __construct(?RedisCache $cache = null, ?int $timeout = null, ?int $maxRetries = null) { $url = getenv('PROVIDER_URL'); if ($url === false || trim($url) === '') { @@ -24,29 +27,37 @@ public function __construct(?RedisCache $cache = null, int $timeout = 10, int $m ); } $this->url = $url; + + $timeoutSeconds = $timeout ?? (int)(getenv('EVENT_PROVIDER_TIMEOUT') ?: 10); + $connectTimeout = (int)(getenv('EVENT_PROVIDER_CONNECT_TIMEOUT') ?: 5); + $maxRetryAttempts = $maxRetries ?? (int)(getenv('EVENT_PROVIDER_RETRY_ATTEMPTS') ?: 3); + $retryBaseMs = (int)(getenv('EVENT_PROVIDER_RETRY_BASE_MS') ?: 500); + $this->client = new Client([ - 'timeout' => $timeout, - 'connect_timeout' => 5, + 'timeout' => $timeoutSeconds, + 'connect_timeout' => $connectTimeout, ]); - $this->circuitBreaker = new CircuitBreaker( - $cache ?? new RedisCache(getenv('REDIS_HOST') ?: 'redis', (int)(getenv('REDIS_PORT') ?: 6379)) - ); - $this->retryStrategy = new RetryStrategy($maxRetries); + + $redis = $cache ?? new RedisCache(getenv('REDIS_HOST') ?: 'redis', (int)(getenv('REDIS_PORT') ?: 6379)); + $failureThreshold = (int)(getenv('EVENT_PROVIDER_CB_FAILURES') ?: 5); + $openSeconds = (int)(getenv('EVENT_PROVIDER_CB_OPEN_SECONDS') ?: 60); + $this->circuitBreaker = new CircuitBreaker($redis, $failureThreshold, $openSeconds); + $this->retryStrategy = new RetryStrategy($maxRetryAttempts, $retryBaseMs); } public function fetchPlans(): string { try { - return $this->circuitBreaker->call(function() { - return $this->retryStrategy->execute(function() { + return $this->circuitBreaker->call(function () { + return $this->retryStrategy->execute(function () { try { $response = $this->client->get($this->url); $body = $response->getBody()->getContents(); - + if (empty($body)) { throw new ProviderException('Empty response from provider'); } - + return $body; } catch (GuzzleException $e) { error_log(sprintf('[Provider] Request failed: %s - URL: %s', $e->getMessage(), $this->url)); diff --git a/src/events/Infrastructure/External/Xml/PlanListParser.php b/src/events/Infrastructure/External/Xml/PlanListParser.php index 512d698..bc7dea5 100644 --- a/src/events/Infrastructure/External/Xml/PlanListParser.php +++ b/src/events/Infrastructure/External/Xml/PlanListParser.php @@ -8,7 +8,11 @@ class PlanListParser implements XmlParserInterface { public function parse(string $xml): array { - $xmlObj = simplexml_load_string($xml); + $xmlObj = simplexml_load_string( + $xml, + SimpleXMLElement::class, + LIBXML_NOERROR | LIBXML_NOWARNING + ); if ($xmlObj === false) { throw new \RuntimeException('Invalid XML format'); } diff --git a/src/events/Infrastructure/Http/Controller/PlanSyncJobsController.php b/src/events/Infrastructure/Http/Controller/PlanSyncJobsController.php new file mode 100644 index 0000000..8bd829b --- /dev/null +++ b/src/events/Infrastructure/Http/Controller/PlanSyncJobsController.php @@ -0,0 +1,38 @@ +jobPublisher->publishPlanSyncJob(); + } catch (\Throwable $e) { + error_log('[Jobs] Failed to publish plan sync: ' . $e->getMessage()); + + return new JsonResponse( + ['error' => 'Could not enqueue plan sync job', 'detail' => $e->getMessage()], + 503 + ); + } + + return new JsonResponse( + [ + 'status' => 'queued', + 'correlation_id' => $correlationId, + 'message' => 'A worker must be running (see docker-compose service worker-plan-sync or make consume-sync).', + ], + 202 + ); + } +} diff --git a/src/events/Infrastructure/Http/Router/RoutesLoader.php b/src/events/Infrastructure/Http/Router/RoutesLoader.php index 90f89ea..02acb81 100644 --- a/src/events/Infrastructure/Http/Router/RoutesLoader.php +++ b/src/events/Infrastructure/Http/Router/RoutesLoader.php @@ -3,6 +3,7 @@ namespace App\Infrastructure\Http\Router; use App\Infrastructure\Http\Controller\HealthController; +use App\Infrastructure\Http\Controller\PlanSyncJobsController; use App\Infrastructure\Http\Controller\ReservationsController; use App\Infrastructure\Http\Controller\SearchEventsController; use Symfony\Component\Routing\Route; @@ -13,7 +14,8 @@ class RoutesLoader public function load( SearchEventsController $searchController, HealthController $healthController, - ReservationsController $reservationsController + ReservationsController $reservationsController, + PlanSyncJobsController $planSyncJobsController ): RouteCollection { $routes = new RouteCollection(); @@ -29,6 +31,10 @@ public function load( '_controller' => [$reservationsController, 'reserveZone'], ], [], [], '', [], ['POST'])); + $routes->add('enqueue_plan_sync', new Route('/jobs/plan-sync', [ + '_controller' => [$planSyncJobsController, 'enqueuePlanSync'], + ], [], [], '', [], ['POST'])); + return $routes; } } diff --git a/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncConsumer.php b/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncConsumer.php new file mode 100644 index 0000000..a506bfe --- /dev/null +++ b/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncConsumer.php @@ -0,0 +1,116 @@ +host, + $this->port, + $this->user, + $this->password, + $this->vhost, + false, + 'AMQPLAIN', + null, + 'en_US', + $this->connectionTimeout, + $this->ioTimeout, + null, + false, + $this->heartbeat + ); + + $channel = $connection->channel(); + $this->topology->declareTopology($channel); + $channel->basic_qos(0, 1, false); + + $channel->basic_consume( + $this->queue, + '', + false, + false, + false, + false, + function (AMQPMessage $msg) use ($runSync): void { + try { + $data = json_decode($msg->getBody(), true, 512, JSON_THROW_ON_ERROR); + } catch (\JsonException $e) { + error_log('[RabbitMQ] Invalid JSON, sending to DLQ: ' . $e->getMessage()); + $msg->nack(false); + + return; + } + + if (!is_array($data) || ($data['type'] ?? '') !== 'sync_plans') { + error_log('[RabbitMQ] Unknown job type, sending to DLQ'); + $msg->nack(false); + + return; + } + + try { + $runSync(); + $msg->ack(); + } catch (\Throwable $e) { + error_log(sprintf( + '[RabbitMQ] Sync failed (%s), sending to DLQ: %s', + $data['correlation_id'] ?? '?', + $e->getMessage() + )); + $msg->nack(false); + } + } + ); + + while (true) { + $channel->wait(); + } + } +} diff --git a/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncJobPublisher.php b/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncJobPublisher.php new file mode 100644 index 0000000..32b6f6d --- /dev/null +++ b/src/events/Infrastructure/Messaging/RabbitMq/AmqpPlanSyncJobPublisher.php @@ -0,0 +1,137 @@ +host, + $this->port, + $this->user, + $this->password, + $this->vhost, + false, + 'AMQPLAIN', + null, + 'en_US', + $this->connectionTimeout, + $this->ioTimeout, + null, + false, + 30 + ); + + $channel = $connection->channel(); + + try { + $this->declareTopology($channel); + + $payload = json_encode([ + 'v' => 1, + 'type' => 'sync_plans', + 'requested_at' => (new \DateTimeImmutable('now', new \DateTimeZone('UTC')))->format(\DateTimeInterface::ATOM), + 'correlation_id' => $correlationId, + ], JSON_THROW_ON_ERROR); + + $message = new AMQPMessage($payload, [ + 'content_type' => 'application/json', + 'delivery_mode' => AMQPMessage::DELIVERY_MODE_PERSISTENT, + 'correlation_id' => $correlationId, + ]); + + $channel->basic_publish($message, $this->exchange, $this->routingKey); + } finally { + $channel->close(); + $connection->close(); + } + + return $correlationId; + } + + public function declareTopology(AMQPChannel $channel): void + { + $channel->exchange_declare($this->exchange, 'topic', false, true, false); + $channel->exchange_declare($this->deadLetterExchange, 'topic', false, true, false); + + $channel->queue_declare($this->deadLetterQueue, false, true, false, false, false); + $channel->queue_bind($this->deadLetterQueue, $this->deadLetterExchange, $this->deadLetterRoutingKey); + + $channel->queue_declare( + $this->queue, + false, + true, + false, + false, + false, + new AMQPTable([ + 'x-dead-letter-exchange' => $this->deadLetterExchange, + 'x-dead-letter-routing-key' => $this->deadLetterRoutingKey, + ]) + ); + + $channel->queue_bind($this->queue, $this->exchange, $this->routingKey); + } +} diff --git a/src/events/Infrastructure/Persistence/Cache/RedisCache.php b/src/events/Infrastructure/Persistence/Cache/RedisCache.php index 909bbaa..2315336 100644 --- a/src/events/Infrastructure/Persistence/Cache/RedisCache.php +++ b/src/events/Infrastructure/Persistence/Cache/RedisCache.php @@ -6,6 +6,9 @@ class RedisCache { + /** Monotonic counter bumped after a successful sync; search keys include this to avoid KEYS * and full-prefix deletes. */ + public const KEY_SEARCH_CACHE_EPOCH = 'search:epoch'; + private Client $client; public function __construct(string $host = 'redis', int $port = 6379) @@ -59,6 +62,34 @@ public function exists(string $key): bool ); } + public function getSearchCacheEpoch(): int + { + $v = $this->get(self::KEY_SEARCH_CACHE_EPOCH); + + return $v === null || $v === '' ? 0 : (int) $v; + } + + public function bumpSearchCacheEpoch(): void + { + $this->executeWithFallback( + fn() => $this->client->incr(self::KEY_SEARCH_CACHE_EPOCH), + 'Incr', + self::KEY_SEARCH_CACHE_EPOCH + ); + } + + /** + * SET key value EX ttl NX — returns true if this client acquired the lock. + */ + public function setIfNotExists(string $key, string $value, int $ttlSeconds): bool + { + return (bool) $this->executeWithFallback(function () use ($key, $value, $ttlSeconds) { + $result = $this->client->set($key, $value, 'EX', $ttlSeconds, 'NX'); + + return $result !== null; + }, 'SetNx', $key, false); + } + private function executeWithFallback(callable $operation, string $operationName, string $key, mixed $fallback = null): mixed { try { diff --git a/src/events/Infrastructure/Persistence/Doctrine/Repository/DoctrinePlanRepository.php b/src/events/Infrastructure/Persistence/Doctrine/Repository/DoctrinePlanRepository.php index 8bbb4f8..7840fc9 100644 --- a/src/events/Infrastructure/Persistence/Doctrine/Repository/DoctrinePlanRepository.php +++ b/src/events/Infrastructure/Persistence/Doctrine/Repository/DoctrinePlanRepository.php @@ -146,11 +146,15 @@ private function toDomainEntity(BasePlanEntity $entity): BasePlan ); } + /** + * One SQL round-trip: plans + base_plans + MIN/MAX(zone prices). No per-plan queries (no N+1). + * Index support: partial idx on (plan_start_date, plan_end_date) for active plans; zones by plan_id for aggregates. + */ public function findByDateRange(\DateTime $startsAt, \DateTime $endsAt): array { try { $em = $this->getEntityManager(); - + $sql = " SELECT p.plan_id,