diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97d1c06..721f5e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,7 +102,7 @@ jobs: psql -h localhost -p 5434 -U postgres -d target_db </dev/null || true + $PG -c "SELECT pgclone.clear_jobs();" 2>/dev/null || true $PG </dev/null DROP TABLE IF EXISTS public.simple_test CASCADE; DROP TABLE IF EXISTS public.simple_test_copy CASCADE; @@ -218,10 +218,10 @@ jobs: # 1: table_async basic echo "---- Async: table_async basic ----" - JOB=$($PG -tAc "SELECT pgclone_table_async('${SOURCE_CONNINFO}', 'public', 'simple_test', true);") + JOB=$($PG -tAc "SELECT pgclone.table_async('${SOURCE_CONNINFO}', 'public', 'simple_test', true);") run_test "returns job_id" "[ -n '$JOB' ] && [ '$JOB' -gt 0 ]" for i in $(seq 1 30); do - S=$($PG -tAc "SELECT status FROM pgclone_jobs_view WHERE job_id=$JOB;" 2>/dev/null | tr -d '[:space:]') + S=$($PG -tAc "SELECT status FROM pgclone.jobs_view WHERE job_id=$JOB;" 2>/dev/null | tr -d '[:space:]') [ "$S" = "completed" ] || [ "$S" = "failed" ] && break; sleep 1 done run_test "job completed" "[ '$S' = 'completed' ]" @@ -230,9 +230,9 @@ jobs: # 2: table_async with target name echo "---- Async: table_async target name ----" - JOB2=$($PG -tAc "SELECT pgclone_table_async('${SOURCE_CONNINFO}', 'public', 'simple_test', true, 'async_renamed');") + JOB2=$($PG -tAc "SELECT pgclone.table_async('${SOURCE_CONNINFO}', 'public', 'simple_test', true, 'async_renamed');") for i in $(seq 1 30); do - S2=$($PG -tAc "SELECT status FROM pgclone_jobs_view WHERE job_id=$JOB2;" 2>/dev/null | tr -d '[:space:]') + S2=$($PG -tAc "SELECT status FROM pgclone.jobs_view WHERE job_id=$JOB2;" 2>/dev/null | tr -d '[:space:]') [ "$S2" = "completed" ] || [ "$S2" = "failed" ] && break; sleep 1 done run_test "renamed job completed" "[ '$S2' = 'completed' ]" @@ -242,9 +242,9 @@ jobs: # 3: schema_async echo "---- Async: schema_async ----" $PG -c "DROP SCHEMA IF EXISTS test_schema CASCADE;" 2>/dev/null || true - JOB3=$($PG -tAc "SELECT pgclone_schema_async('${SOURCE_CONNINFO}', 'test_schema', true);") + JOB3=$($PG -tAc "SELECT pgclone.schema_async('${SOURCE_CONNINFO}', 'test_schema', true);") for i in $(seq 1 60); do - S3=$($PG -tAc "SELECT status FROM pgclone_jobs_view WHERE job_id=$JOB3;" 2>/dev/null | tr -d '[:space:]') + S3=$($PG -tAc "SELECT status FROM pgclone.jobs_view WHERE job_id=$JOB3;" 2>/dev/null | tr -d '[:space:]') [ "$S3" = "completed" ] || [ "$S3" = "failed" ] && break; sleep 1 done run_test "schema_async completed" "[ '$S3' = 'completed' ]" @@ -253,16 +253,16 @@ jobs: # 4: progress/jobs/view echo "---- Async: progress & jobs ----" - PR=$($PG -tAc "SELECT pgclone_progress($JOB);" 2>/dev/null) + PR=$($PG -tAc "SELECT pgclone.progress($JOB);" 2>/dev/null) run_test "progress returns JSON" "echo '$PR' | grep -q 'job_id'" - JJ=$($PG -tAc "SELECT pgclone_jobs();" 2>/dev/null) + JJ=$($PG -tAc "SELECT pgclone.jobs();" 2>/dev/null) run_test "jobs returns JSON" "echo '$JJ' | grep -q 'job_id'" - VC=$($PG -tAc "SELECT count(*) FROM pgclone_jobs_view;" 2>/dev/null | tr -d '[:space:]') + VC=$($PG -tAc "SELECT count(*) FROM pgclone.jobs_view;" 2>/dev/null | tr -d '[:space:]') run_test "jobs_view has rows" "[ '$VC' -ge 1 ]" # 5: clear_jobs echo "---- Async: clear_jobs ----" - CL=$($PG -tAc "SELECT pgclone_clear_jobs();" 2>/dev/null | tr -d '[:space:]') + CL=$($PG -tAc "SELECT pgclone.clear_jobs();" 2>/dev/null | tr -d '[:space:]') run_test "clear_jobs works" "[ '$CL' -ge 1 ]" echo "" diff --git a/CHANGELOG.md b/CHANGELOG.md index 1215529..7bbb98e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,37 @@ All notable changes to pgclone are documented in this file. +## [4.0.0] — BREAKING + +### Changed +- **Schema namespace**: All pgclone functions now live under the `pgclone` schema, created automatically by the extension + - `pgclone_table(...)` → `pgclone.table(...)` + - `pgclone_schema(...)` → `pgclone.schema(...)` + - `pgclone_database(...)` → `pgclone.database(...)` + - `pgclone_database_create(...)` → `pgclone.database_create(...)` + - `pgclone_table_async(...)` → `pgclone.table_async(...)` + - `pgclone_schema_async(...)` → `pgclone.schema_async(...)` + - `pgclone_progress(...)` → `pgclone.progress(...)` + - `pgclone_cancel(...)` → `pgclone.cancel(...)` + - `pgclone_resume(...)` → `pgclone.resume(...)` + - `pgclone_jobs()` → `pgclone.jobs()` + - `pgclone_clear_jobs()` → `pgclone.clear_jobs()` + - `pgclone_progress_detail()` → `pgclone.progress_detail()` + - `pgclone_jobs_view` → `pgclone.jobs_view` + - `pgclone_discover_sensitive(...)` → `pgclone.discover_sensitive(...)` + - `pgclone_mask_in_place(...)` → `pgclone.mask_in_place(...)` + - `pgclone_create_masking_policy(...)` → `pgclone.create_masking_policy(...)` + - `pgclone_drop_masking_policy(...)` → `pgclone.drop_masking_policy(...)` + - `pgclone_clone_roles(...)` → `pgclone.clone_roles(...)` + - `pgclone_verify(...)` → `pgclone.verify(...)` + - `pgclone_masking_report(...)` → `pgclone.masking_report(...)` + - `pgclone_version()` → `pgclone.version()` + - `pgclone_table_ex(...)` → `pgclone.table_ex(...)` + - `pgclone_schema_ex(...)` → `pgclone.schema_ex(...)` + - `pgclone_functions(...)` → `pgclone.functions(...)` +- Extension control file now specifies `schema = pgclone` +- **Upgrade path**: This is a breaking change. Users must `DROP EXTENSION pgclone; CREATE EXTENSION pgclone;` to upgrade from v3.x. All application queries must be updated to use the new `pgclone.` prefix. + ## [3.6.0] ### Added diff --git a/META.json b/META.json index d86d1be..f459ba6 100644 --- a/META.json +++ b/META.json @@ -2,14 +2,14 @@ "name": "pgclone", "abstract": "Clone PostgreSQL databases, schemas, tables between staging, test, dev and prod environments", "description": "PostgreSQL extension for easily cloning your DB, Schemas, Tables and more between environments", - "version": "3.6.0", + "version": "4.0.0", "maintainer": "Valeh Agayev ", "license": "postgresql", "provides": { "pgclone": { "abstract": "Clone PostgreSQL databases, schemas, and tables across environments", - "file": "sql/pgclone--3.6.0.sql", - "version": "3.6.0" + "file": "sql/pgclone--4.0.0.sql", + "version": "4.0.0" } }, "prereqs": { diff --git a/README.md b/README.md index d4a4a7c..13bfb35 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![CI](https://github.com/valehdba/pgclone/actions/workflows/ci.yml/badge.svg)](https://github.com/valehdba/pgclone/actions/workflows/ci.yml) [![Postgres 14–18](https://img.shields.io/badge/Postgres-14%E2%80%9318-336791?logo=postgresql&logoColor=white)](https://github.com/valehdba/pgclone) [![License](https://img.shields.io/badge/License-PostgreSQL-blue.svg)](https://github.com/valehdba/pgclone/blob/main/LICENSE) -[![Version](https://img.shields.io/badge/version-3.6.0-orange)](https://github.com/valehdba/pgclone/releases/tag/v3.6.0) +[![Version](https://img.shields.io/badge/version-4.0.0-orange)](https://github.com/valehdba/pgclone/releases/tag/v4.0.0) A PostgreSQL extension that clones databases, schemas, tables, and functions between PostgreSQL instances — directly from SQL. No `pg_dump`, no `pg_restore`, no shell scripts. @@ -36,19 +36,19 @@ sudo make install CREATE EXTENSION pgclone; -- Clone a table -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres password=secret', 'public', 'customers', true ); -- Clone an entire schema -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=source-server dbname=mydb user=postgres password=secret', 'sales', true ); -- Clone a full database -SELECT pgclone_database( +SELECT pgclone.database( 'host=source-server dbname=mydb user=postgres password=secret', true ); @@ -96,7 +96,7 @@ sudo make install PG_CONFIG=/usr/lib/postgresql/18/bin/pg_config ```sql CREATE EXTENSION pgclone; -SELECT pgclone_version(); +SELECT pgclone.version(); ``` For async operations, add to `postgresql.conf` and restart: @@ -127,8 +127,8 @@ pgclone uses Unix domain sockets for local loopback connections, so the default - [x] v1.1.0: Selective column cloning and data filtering - [x] v1.2.0: Materialized views and exclusion constraints - [x] v2.0.0: True multi-worker parallel cloning - - [x] v2.0.1: `pgclone_database_create()` — create + clone database - - [x] v2.1.0: Progress tracking view (`pgclone_jobs_view`) + - [x] v2.0.1: `pgclone.database_create()` — create + clone database + - [x] v2.1.0: Progress tracking view (`pgclone.jobs_view`) - [x] v2.1.1: Visual progress bar - [x] v2.1.3: Elapsed time tracking - [x] v2.1.4: Unix domain socket auth (no more pg_hba.conf trust requirement) @@ -141,7 +141,8 @@ pgclone uses Unix domain sockets for local loopback connections, so the default - [x] v3.4.0: Clone roles with permissions and passwords - [x] v3.5.0: Clone verification — compare row counts across source and target - [x] v3.6.0: GDPR/Compliance masking report -- [ ] v4.0.0: Copy-on-Write (CoW) mode for local cloning +- [x] v4.0.0: Schema namespace — all functions under `pgclone` schema (`pgclone.table()`, `pgclone.schema()`, etc.) +- [ ] v5.0.0: Copy-on-Write (CoW) mode for local cloning ## License diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 9e44c2c..f6958da 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -128,7 +128,7 @@ typedef struct PgcloneSharedState { - Allocated once during `_PG_init()` via shared memory hooks - Protected by a lightweight lock (`LWLock`) for concurrent access -- Read by `pgclone_progress()`, `pgclone_jobs()`, and `pgclone_jobs_view` +- Read by `pgclone.progress()`, `pgclone.jobs()`, and `pgclone.jobs_view` - Written by background workers as they progress --- @@ -189,7 +189,7 @@ PostgreSQL 17 removed the `die` signal handler, replacing it with `SignalHandler ## Background Worker Lifecycle -1. **Registration:** `pgclone_table_async()` or `pgclone_schema_async()` allocates a job slot in shared memory, populates connection info and parameters, then calls `RegisterDynamicBackgroundWorker()`. +1. **Registration:** `pgclone.table_async()` or `pgclone.schema_async()` allocates a job slot in shared memory, populates connection info and parameters, then calls `RegisterDynamicBackgroundWorker()`. 2. **Startup:** The worker process starts via `pgclone_bgw_main()`, which: - Sets up signal handlers @@ -198,7 +198,7 @@ PostgreSQL 17 removed the `die` signal handler, replacing it with `SignalHandler 3. **Execution:** The worker calls the same core clone functions used by sync operations, with periodic updates to shared memory (rows copied, current table, elapsed time). -4. **Worker Pool mode (v2.2.0):** For `pgclone_schema_async` with `"parallel": N`, the parent process: +4. **Worker Pool mode (v2.2.0):** For `pgclone.schema_async` with `"parallel": N`, the parent process: - Queries the source for the list of tables - Populates a shared-memory task queue (`PgclonePoolQueue`) - Launches exactly N pool workers via `pgclone_pool_worker_main()` diff --git a/docs/ASYNC.md b/docs/ASYNC.md index ce37a18..fb5a11c 100644 --- a/docs/ASYNC.md +++ b/docs/ASYNC.md @@ -31,21 +31,21 @@ Without `shared_preload_libraries`, async functions will not be available. ```sql -- Returns a job_id (integer) -SELECT pgclone_table_async( +SELECT pgclone.table_async( 'host=source-server dbname=mydb user=postgres', 'public', 'large_table', true ); -- Returns: 1 ``` -All options available for `pgclone_table` also work with `pgclone_table_async`, including target name, JSON options, conflict strategy, selective columns, and WHERE filters. +All options available for `pgclone.table` also work with `pgclone.table_async`, including target name, JSON options, conflict strategy, selective columns, and WHERE filters. --- ## Async Schema Clone ```sql -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=source-server dbname=mydb user=postgres', 'sales', true ); @@ -59,14 +59,14 @@ Clone tables in parallel using a fixed-size worker pool. Instead of spawning one ```sql -- Clone schema with 4 pool workers -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=source-server dbname=mydb user=postgres', 'sales', true, '{"parallel": 4}' ); -- Combine parallel with other options -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=source-server dbname=mydb user=postgres', 'sales', true, '{"parallel": 8, "conflict": "replace", "triggers": false}' @@ -95,7 +95,7 @@ SELECT pgclone_schema_async( - Maximum 512 tables per pool operation (`PGCLONE_MAX_POOL_TASKS`). - Only one pool operation can run at a time per database cluster. -- Pool workers are visible in `pgclone_jobs_view` as individual table-type jobs. +- Pool workers are visible in `pgclone.jobs_view` as individual table-type jobs. --- @@ -104,7 +104,7 @@ SELECT pgclone_schema_async( ### Check a specific job ```sql -SELECT pgclone_progress(1); +SELECT pgclone.progress(1); ``` Returns JSON: @@ -125,7 +125,7 @@ Returns JSON: ### List all jobs ```sql -SELECT pgclone_jobs(); +SELECT pgclone.jobs(); -- Returns JSON array of all active/recent jobs ``` @@ -136,7 +136,7 @@ SELECT pgclone_jobs(); Query live progress of all async clone jobs as a standard PostgreSQL view with visual progress bar and elapsed time: ```sql -SELECT job_id, status, schema_name, progress_bar FROM pgclone_jobs_view; +SELECT job_id, status, schema_name, progress_bar FROM pgclone.jobs_view; ``` ``` @@ -152,22 +152,22 @@ SELECT job_id, status, schema_name, progress_bar FROM pgclone_jobs_view; ```sql -- Running jobs with elapsed time SELECT job_id, status, elapsed_time, pct_complete -FROM pgclone_jobs_view +FROM pgclone.jobs_view WHERE status = 'running'; -- Failed jobs with error messages SELECT job_id, schema_name, error_message -FROM pgclone_jobs_view +FROM pgclone.jobs_view WHERE status = 'failed'; ``` ### Full detail ```sql -SELECT * FROM pgclone_jobs_view; +SELECT * FROM pgclone.jobs_view; -- Or via the underlying function: -SELECT * FROM pgclone_progress_detail(); +SELECT * FROM pgclone.progress_detail(); ``` --- @@ -177,7 +177,7 @@ SELECT * FROM pgclone_progress_detail(); ### Cancel a running job ```sql -SELECT pgclone_cancel(1); +SELECT pgclone.cancel(1); ``` ### Resume a failed job @@ -185,14 +185,14 @@ SELECT pgclone_cancel(1); Resumes from the last checkpoint, returns a new job_id: ```sql -SELECT pgclone_resume(1); +SELECT pgclone.resume(1); -- Returns: 2 ``` ### Clear completed/failed jobs ```sql -SELECT pgclone_clear_jobs(); +SELECT pgclone.clear_jobs(); -- Returns: number of jobs cleared ``` @@ -204,15 +204,15 @@ All conflict strategies work with async functions: ```sql -- Skip if table exists -SELECT pgclone_table_async(conn, 'public', 'orders', true, 'orders', +SELECT pgclone.table_async(conn, 'public', 'orders', true, 'orders', '{"conflict": "skip"}'); -- Drop and re-create -SELECT pgclone_table_async(conn, 'public', 'orders', true, 'orders', +SELECT pgclone.table_async(conn, 'public', 'orders', true, 'orders', '{"conflict": "replace"}'); -- Rename existing table -SELECT pgclone_table_async(conn, 'public', 'orders', true, 'orders', +SELECT pgclone.table_async(conn, 'public', 'orders', true, 'orders', '{"conflict": "rename"}'); ``` @@ -223,7 +223,7 @@ SELECT pgclone_table_async(conn, 'public', 'orders', true, 'orders', 1. When you call an async function, pgclone registers a background worker with PostgreSQL's `BackgroundWorker` API. 2. The background worker starts in a separate process, connects to both source and target databases using `libpq`, and performs the clone operation. 3. Progress is tracked in shared memory (`pgclone_state`), which is allocated via `shmem_request_hook` (PG 15+) or `RequestAddinShmemSpace` (PG 14). -4. The `pgclone_jobs_view` reads shared memory to display real-time progress. +4. The `pgclone.jobs_view` reads shared memory to display real-time progress. 5. For parallel cloning (v2.2.0+), the parent process populates a shared-memory task queue and launches exactly N pool workers. Each worker pulls tasks from the queue until it's empty — providing dynamic load balancing with O(N) resource usage. **Tip:** Verbose per-table/per-row NOTICE messages have been moved to DEBUG1 level. To see them: diff --git a/docs/TESTING.md b/docs/TESTING.md index 4117bad..79b0886 100644 --- a/docs/TESTING.md +++ b/docs/TESTING.md @@ -46,19 +46,19 @@ Located in `test/test_loopback.sh`. These tests call functions that use loopback ### 3. Database Create Tests (7 tests) -Located in `test/test_database_create.sh` — verifies `pgclone_database_create()` creates a new database, clones data into it, and supports idempotent re-cloning. +Located in `test/test_database_create.sh` — verifies `pgclone.database_create()` creates a new database, clones data into it, and supports idempotent re-cloning. ### 4. Async Tests (21 tests) Located in `test/test_async.sh` — covers background worker operations (8 tests): -- TEST 1: `pgclone_table_async` — basic async table clone -- TEST 2: `pgclone_table_async` — with different target name -- TEST 3: `pgclone_schema_async` — sequential mode -- TEST 4: `pgclone_progress` — check progress JSON -- TEST 5: `pgclone_jobs` — list all jobs -- TEST 6: `pgclone_jobs_view` — progress tracking view -- TEST 7: `pgclone_clear_jobs` — cleanup completed/failed jobs +- TEST 1: `pgclone.table_async` — basic async table clone +- TEST 2: `pgclone.table_async` — with different target name +- TEST 3: `pgclone.schema_async` — sequential mode +- TEST 4: `pgclone.progress` — check progress JSON +- TEST 5: `pgclone.jobs` — list all jobs +- TEST 6: `pgclone.jobs_view` — progress tracking view +- TEST 7: `pgclone.clear_jobs` — cleanup completed/failed jobs - TEST 8: Worker Pool — parallel schema clone with pool workers (v2.2.0) --- @@ -186,7 +186,7 @@ SELECT plan(67); -- increment after adding your test -- Your new test SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true)', + format('SELECT pgclone.table(%L, %L, %L, true)', current_setting('app.source_conninfo'), 'public', 'new_table'), 'description of what this tests' @@ -200,7 +200,7 @@ SELECT lives_ok( Add to `test/test_loopback.sh` for functions that run DDL via loopback: ```bash -RESULT=$(pg "SELECT pgclone_your_function(...);" || echo "ERROR") +RESULT=$(pg "SELECT pgclone.your_function(...);" || echo "ERROR") run_test "description" "[ '$RESULT' != 'ERROR' ]" ``` diff --git a/docs/TESTING_MANUAL.md b/docs/TESTING_MANUAL.md index f7a01db..be22ef4 100644 --- a/docs/TESTING_MANUAL.md +++ b/docs/TESTING_MANUAL.md @@ -43,7 +43,7 @@ This creates `test_schema` with tables (`customers`, `orders`, `order_items`, `e psql -h 172.17.0.3 -p 5433 -U postgres -d db2 CREATE EXTENSION IF NOT EXISTS pgclone; -SELECT pgclone_version(); +SELECT pgclone.version(); -- Expected: pgclone 3.6.0 ``` @@ -52,7 +52,7 @@ SELECT pgclone_version(); ## Test 1: Extension Installation & Version ```sql -SELECT pgclone_version(); +SELECT pgclone.version(); ``` **Expected:** `pgclone 3.6.0` @@ -70,7 +70,7 @@ SELECT * FROM pg_extension WHERE extname = 'pgclone'; ### 2.1 Clone table with data ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true ); @@ -87,7 +87,7 @@ SELECT COUNT(*) FROM test_schema.customers; ### 2.2 Clone structure only (no data) ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'orders', false ); @@ -102,7 +102,7 @@ SELECT COUNT(*) FROM test_schema.orders; ### 2.3 Clone with different target name ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers_backup' @@ -118,7 +118,7 @@ SELECT COUNT(*) FROM test_schema.customers_backup; ### 2.4 Selective column cloning ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers_lite', @@ -137,7 +137,7 @@ ORDER BY ordinal_position; ### 2.5 Clone with WHERE filter ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'active_customers_copy', @@ -155,7 +155,7 @@ SELECT DISTINCT status FROM test_schema.active_customers_copy; ### 2.6 Columns + WHERE combined ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'high_scorers', @@ -178,7 +178,7 @@ SELECT * FROM test_schema.high_scorers ORDER BY score DESC; ```sql DROP TABLE IF EXISTS test_schema.customers CASCADE; -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers', @@ -199,7 +199,7 @@ WHERE schemaname = 'test_schema' AND tablename = 'customers'; DROP TABLE IF EXISTS test_schema.order_items CASCADE; DROP TABLE IF EXISTS test_schema.orders CASCADE; -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'orders', true, 'orders', @@ -219,7 +219,7 @@ WHERE conrelid = 'test_schema.orders'::regclass; ```sql DROP TABLE IF EXISTS test_schema.orders CASCADE; -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'orders', true, 'orders', @@ -234,12 +234,12 @@ WHERE tgrelid = 'test_schema.orders'::regclass AND NOT tgisinternal; **Expected:** No triggers -### 3.4 pgclone_table_ex() with boolean parameters +### 3.4 pgclone.table_ex() with boolean parameters ```sql DROP TABLE IF EXISTS test_schema.orders CASCADE; -SELECT pgclone_table_ex( +SELECT pgclone.table_ex( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'orders', true, 'orders', false, -- skip indexes @@ -264,7 +264,7 @@ SELECT tgname FROM pg_trigger WHERE tgrelid = 'test_schema.orders'::regclass AND ```sql -- Table already exists from previous tests -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true ); @@ -275,7 +275,7 @@ SELECT pgclone_table( ### 4.2 Skip ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers', @@ -288,7 +288,7 @@ SELECT pgclone_table( ### 4.3 Replace ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers', @@ -305,7 +305,7 @@ SELECT COUNT(*) FROM test_schema.customers; ### 4.4 Rename ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers', @@ -330,7 +330,7 @@ ORDER BY tablename; ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true ); @@ -378,7 +378,7 @@ SELECT indexname FROM pg_indexes WHERE schemaname = 'test_schema' ORDER BY index ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', false ); @@ -396,7 +396,7 @@ UNION ALL SELECT 'orders', COUNT(*) FROM test_schema.orders; ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, '{"triggers": false, "indexes": false}' @@ -412,12 +412,12 @@ WHERE n.nspname = 'test_schema' AND NOT t.tgisinternal; **Expected:** Trigger count = 0. Only PK indexes present. -### 5.4 pgclone_schema_ex() with boolean parameters +### 5.4 pgclone.schema_ex() with boolean parameters ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema_ex( +SELECT pgclone.schema_ex( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, true, -- include indexes @@ -436,7 +436,7 @@ SELECT pgclone_schema_ex( DROP SCHEMA IF EXISTS test_schema CASCADE; CREATE SCHEMA test_schema; -SELECT pgclone_functions( +SELECT pgclone.functions( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema' ); @@ -459,7 +459,7 @@ WHERE routine_schema = 'test_schema' ORDER BY routine_name; DROP SCHEMA IF EXISTS test_schema CASCADE; DROP TABLE IF EXISTS public.simple_test CASCADE; -SELECT pgclone_database( +SELECT pgclone.database( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', true ); @@ -480,7 +480,7 @@ GROUP BY schemaname ORDER BY schemaname; DROP SCHEMA IF EXISTS test_schema CASCADE; DROP TABLE IF EXISTS public.simple_test CASCADE; -SELECT pgclone_database( +SELECT pgclone.database( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', true, '{"triggers": false}' @@ -489,7 +489,7 @@ SELECT pgclone_database( **Expected:** All data cloned, no triggers on any table. -### 7.3 Clone into a new database (pgclone_database_create) +### 7.3 Clone into a new database (pgclone.database_create) > **Run from the `postgres` database on Server 2, not `db2`.** @@ -498,7 +498,7 @@ psql -h 172.17.0.3 -p 5433 -U postgres -d postgres CREATE EXTENSION IF NOT EXISTS pgclone; -SELECT pgclone_database_create( +SELECT pgclone.database_create( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'db1_clone', true @@ -525,7 +525,7 @@ Masking is applied server-side during COPY — source data never stored unmasked ### 8.1 Email masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_email_masked', @@ -540,7 +540,7 @@ SELECT id, full_name, email FROM test_schema.emp_email_masked ORDER BY id; ### 8.2 Name masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_name_masked', @@ -555,7 +555,7 @@ SELECT id, full_name FROM test_schema.emp_name_masked ORDER BY id; ### 8.3 Phone masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_phone_masked', @@ -570,7 +570,7 @@ SELECT id, phone FROM test_schema.emp_phone_masked ORDER BY id; ### 8.4 Hash masking (MD5) ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_hash_masked', @@ -585,7 +585,7 @@ SELECT id, email FROM test_schema.emp_hash_masked ORDER BY id; ### 8.5 Null masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_null_masked', @@ -600,7 +600,7 @@ SELECT id, ssn FROM test_schema.emp_null_masked ORDER BY id; ### 8.6 Partial masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_partial_masked', @@ -615,7 +615,7 @@ SELECT id, full_name FROM test_schema.emp_partial_masked ORDER BY id; ### 8.7 Random integer masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_rand_masked', @@ -630,7 +630,7 @@ SELECT id, salary FROM test_schema.emp_rand_masked ORDER BY id; ### 8.8 Constant masking ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_const_masked', @@ -645,7 +645,7 @@ SELECT id, notes FROM test_schema.emp_const_masked ORDER BY id; ### 8.9 Multiple masks combined ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_full_masked', @@ -662,7 +662,7 @@ SELECT * FROM test_schema.emp_full_masked ORDER BY id; ## Test 9: Auto-Discovery of Sensitive Data (v3.1.0) ```sql -SELECT pgclone_discover_sensitive( +SELECT pgclone.discover_sensitive( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema' ); @@ -687,7 +687,7 @@ Apply masking to an already-existing local table. ```sql DROP TABLE IF EXISTS test_schema.emp_for_static_mask; -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_for_static_mask' @@ -702,7 +702,7 @@ SELECT full_name, email, ssn FROM test_schema.emp_for_static_mask ORDER BY id; ### 10.2 Apply in-place masking ```sql -SELECT pgclone_mask_in_place( +SELECT pgclone.mask_in_place( 'test_schema', 'emp_for_static_mask', '{"email": "email", "full_name": "name", "ssn": "null"}' ); @@ -727,7 +727,7 @@ Role-based masking policies that preserve original data. ```sql DROP TABLE IF EXISTS test_schema.employees CASCADE; -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true ); @@ -743,7 +743,7 @@ END $$; ### 11.2 Create masking policy ```sql -SELECT pgclone_create_masking_policy( +SELECT pgclone.create_masking_policy( 'test_schema', 'employees', '{"email": "email", "full_name": "name", "ssn": "null", "salary": {"type": "random_int", "min": 40000, "max": 200000}}', 'data_admin' @@ -781,7 +781,7 @@ RESET ROLE; ### 11.6 Drop masking policy ```sql -SELECT pgclone_drop_masking_policy('test_schema', 'employees'); +SELECT pgclone.drop_masking_policy('test_schema', 'employees'); SELECT viewname FROM pg_views WHERE schemaname = 'test_schema' AND viewname = 'employees_masked'; @@ -796,7 +796,7 @@ WHERE schemaname = 'test_schema' AND viewname = 'employees_masked'; ### 12.1 Clone all roles ```sql -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654' ); ``` @@ -817,7 +817,7 @@ ORDER BY rolname; DROP ROLE IF EXISTS test_reader; DROP ROLE IF EXISTS test_writer; -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_reader, test_writer' ); @@ -843,7 +843,7 @@ ORDER BY table_name, privilege_type; ### 12.4 Existing role update ```sql -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_admin' ); @@ -860,7 +860,7 @@ SELECT pgclone_clone_roles( ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true ); @@ -869,7 +869,7 @@ SELECT pgclone_schema( ### 13.2 Verify specific schema ```sql -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema' ); @@ -880,7 +880,7 @@ SELECT * FROM pgclone_verify( ### 13.3 Verify all schemas ```sql -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654' ); ``` @@ -893,7 +893,7 @@ SELECT * FROM pgclone_verify( DELETE FROM test_schema.customers WHERE id > 5; ANALYZE test_schema.customers; -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema' ); @@ -908,7 +908,7 @@ SELECT * FROM pgclone_verify( ### 14.1 Report on unmasked schema ```sql -SELECT * FROM pgclone_masking_report('test_schema'); +SELECT * FROM pgclone.masking_report('test_schema'); ``` **Expected:** Lists sensitive columns with sensitivity categories (`Email`, `PII - Name`, `Phone`, `Financial`, `National ID`), `mask_status = UNMASKED`, and recommended strategies. @@ -916,20 +916,20 @@ SELECT * FROM pgclone_masking_report('test_schema'); ### 14.2 Apply policy then re-check ```sql -SELECT pgclone_create_masking_policy( +SELECT pgclone.create_masking_policy( 'test_schema', 'employees', '{"email": "email", "full_name": "name", "ssn": "null"}', 'data_admin' ); -SELECT * FROM pgclone_masking_report('test_schema'); +SELECT * FROM pgclone.masking_report('test_schema'); ``` **Expected:** Employee columns now show `mask_status = MASKED (view)`. ```sql -- Cleanup -SELECT pgclone_drop_masking_policy('test_schema', 'employees'); +SELECT pgclone.drop_masking_policy('test_schema', 'employees'); ``` --- @@ -943,7 +943,7 @@ SELECT pgclone_drop_masking_policy('test_schema', 'employees'); ```sql DROP TABLE IF EXISTS test_schema.customers CASCADE; -SELECT pgclone_table_async( +SELECT pgclone.table_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true ); @@ -954,9 +954,9 @@ SELECT pgclone_table_async( ### 15.2 Check progress ```sql -SELECT pgclone_progress(1); +SELECT pgclone.progress(1); -SELECT * FROM pgclone_jobs_view; +SELECT * FROM pgclone.jobs_view; ``` **Expected:** Shows status (`pending`/`running`/`completed`), `rows_copied`, `progress_bar`, `elapsed_time`. @@ -966,12 +966,12 @@ SELECT * FROM pgclone_jobs_view; ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true ); -SELECT job_id, status, schema_name, progress_bar FROM pgclone_jobs_view; +SELECT job_id, status, schema_name, progress_bar FROM pgclone.jobs_view; ``` **Expected:** Schema clone progress with table-level tracking. @@ -979,7 +979,7 @@ SELECT job_id, status, schema_name, progress_bar FROM pgclone_jobs_view; ### 15.4 List all jobs ```sql -SELECT pgclone_jobs(); +SELECT pgclone.jobs(); ``` **Expected:** JSON array of all jobs. @@ -987,7 +987,7 @@ SELECT pgclone_jobs(); ### 15.5 Async with conflict strategy ```sql -SELECT pgclone_table_async( +SELECT pgclone.table_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'customers', @@ -1001,15 +1001,15 @@ SELECT pgclone_table_async( ```sql -- Start a clone and cancel it (use the returned job_id) -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, '{"conflict": "replace"}' ); -SELECT pgclone_cancel(3); -- use actual job_id +SELECT pgclone.cancel(3); -- use actual job_id -SELECT status FROM pgclone_jobs_view WHERE job_id = 3; +SELECT status FROM pgclone.jobs_view WHERE job_id = 3; ``` **Expected:** `status = 'cancelled'` @@ -1017,9 +1017,9 @@ SELECT status FROM pgclone_jobs_view WHERE job_id = 3; ### 15.7 Clear completed jobs ```sql -SELECT pgclone_clear_jobs(); +SELECT pgclone.clear_jobs(); -SELECT * FROM pgclone_jobs_view; +SELECT * FROM pgclone.jobs_view; ``` **Expected:** Returns count of cleared jobs. Only running/pending jobs remain. @@ -1033,21 +1033,21 @@ SELECT * FROM pgclone_jobs_view; ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, '{"parallel": 4}' ); SELECT job_id, status, op_type, table_name, progress_bar -FROM pgclone_jobs_view ORDER BY job_id; +FROM pgclone.jobs_view ORDER BY job_id; ``` **Expected:** Parent job + up to 4 pool worker jobs visible. ```sql -- After completion, verify -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema' ); @@ -1060,7 +1060,7 @@ SELECT * FROM pgclone_verify( ```sql DROP SCHEMA IF EXISTS test_schema CASCADE; -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, '{"parallel": 4, "conflict": "replace", "triggers": false}' @@ -1074,23 +1074,23 @@ SELECT pgclone_schema_async( ## Test 17: Progress Tracking View ```sql -SELECT pgclone_schema_async( +SELECT pgclone.schema_async( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', true, '{"conflict": "replace"}' ); -- Detailed progress function -SELECT * FROM pgclone_progress_detail(); +SELECT * FROM pgclone.progress_detail(); -- Convenience view SELECT job_id, status, schema_name, table_name, pct_complete, progress_bar, elapsed_time -FROM pgclone_jobs_view; +FROM pgclone.jobs_view; -- Filter by status -SELECT job_id, status, elapsed_time FROM pgclone_jobs_view WHERE status = 'running'; -SELECT job_id, error_message FROM pgclone_jobs_view WHERE status = 'failed'; +SELECT job_id, status, elapsed_time FROM pgclone.jobs_view WHERE status = 'running'; +SELECT job_id, error_message FROM pgclone.jobs_view WHERE status = 'failed'; ``` **Expected:** All columns visible — `job_id`, `status`, `op_type`, `schema_name`, `table_name`, `current_phase`, `tables_total`, `tables_completed`, `rows_copied`, `bytes_copied`, `elapsed_ms`, `start_time`, `end_time`, `pct_complete`, `progress_bar`, `elapsed_time`. @@ -1102,7 +1102,7 @@ SELECT job_id, error_message FROM pgclone_jobs_view WHERE status = 'failed'; ### 18.1 Invalid connection string ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=192.168.99.99 dbname=nonexistent user=postgres password=wrong', 'public', 'test', true ); @@ -1113,7 +1113,7 @@ SELECT pgclone_table( ### 18.2 Non-existent table ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'this_table_does_not_exist', true ); @@ -1124,7 +1124,7 @@ SELECT pgclone_table( ### 18.3 Non-existent schema ```sql -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'nonexistent_schema', true ); @@ -1135,7 +1135,7 @@ SELECT pgclone_schema( ### 18.4 SQL injection in WHERE clause ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'injection_test', @@ -1148,7 +1148,7 @@ SELECT pgclone_table( ### 18.5 Invalid mask strategy ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'employees', true, 'emp_bad_mask', @@ -1161,7 +1161,7 @@ SELECT pgclone_table( ### 18.6 Invalid JSON options ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=172.17.0.2 port=5432 dbname=db1 user=postgres password=123654', 'test_schema', 'customers', true, 'json_test', @@ -1185,7 +1185,7 @@ DROP ROLE IF EXISTS test_writer; DROP ROLE IF EXISTS test_admin; DROP ROLE IF EXISTS data_admin; -SELECT pgclone_clear_jobs(); +SELECT pgclone.clear_jobs(); -- To drop the cloned database (connect to postgres db first): -- \c postgres diff --git a/docs/USAGE.md b/docs/USAGE.md index 0e0725d..68a08a8 100644 --- a/docs/USAGE.md +++ b/docs/USAGE.md @@ -54,7 +54,7 @@ postgresql://username:password@hostname:5432/database ### Clone a table with data ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres password=secret', 'public', -- schema name 'customers', -- table name @@ -65,7 +65,7 @@ SELECT pgclone_table( ### Clone structure only (no data) ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres password=secret', 'public', 'customers', @@ -76,7 +76,7 @@ SELECT pgclone_table( ### Clone with a different target name ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres password=secret', 'public', 'customers', -- source table name @@ -92,7 +92,7 @@ SELECT pgclone_table( Clone an entire schema including tables, views, functions, sequences, materialized views, indexes, constraints, and triggers: ```sql -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=source-server dbname=mydb user=postgres password=secret', 'sales', -- schema to clone true -- include table data @@ -102,7 +102,7 @@ SELECT pgclone_schema( ### Clone only functions from a schema ```sql -SELECT pgclone_functions( +SELECT pgclone.functions( 'host=source-server dbname=mydb user=postgres password=secret', 'utils' -- schema containing functions ); @@ -117,7 +117,7 @@ SELECT pgclone_functions( Clone all user schemas from a remote database into the current database: ```sql -SELECT pgclone_database( +SELECT pgclone.database( 'host=source-server dbname=mydb user=postgres password=secret', true -- include data ); @@ -128,20 +128,20 @@ SELECT pgclone_database( Create a new local database and clone everything from a remote source. Run this from the `postgres` database: ```sql -SELECT pgclone_database_create( +SELECT pgclone.database_create( 'host=source-server dbname=production user=postgres password=secret', 'staging_db' -- target database name (created if not exists) ); -- Structure only -SELECT pgclone_database_create( +SELECT pgclone.database_create( 'host=source-server dbname=production user=postgres password=secret', 'staging_db', false -- include_data = false ); -- With options -SELECT pgclone_database_create( +SELECT pgclone.database_create( 'host=source-server dbname=production user=postgres password=secret', 'staging_db', true, @@ -161,21 +161,21 @@ By default, all indexes, constraints (PK, UNIQUE, CHECK, FK, EXCLUDE), and trigg ```sql -- Clone table without indexes and triggers -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres password=secret', 'public', 'orders', true, 'orders', '{"indexes": false, "triggers": false}' ); -- Clone schema without any constraints -SELECT pgclone_schema( +SELECT pgclone.schema( 'host=source-server dbname=mydb user=postgres password=secret', 'sales', true, '{"constraints": false}' ); -- Clone database without triggers -SELECT pgclone_database( +SELECT pgclone.database( 'host=source-server dbname=mydb user=postgres password=secret', true, '{"triggers": false}' @@ -185,9 +185,9 @@ SELECT pgclone_database( ### Boolean parameters format ```sql --- pgclone_table_ex(conninfo, schema, table, include_data, target_name, +-- pgclone.table_ex(conninfo, schema, table, include_data, target_name, -- include_indexes, include_constraints, include_triggers) -SELECT pgclone_table_ex( +SELECT pgclone.table_ex( 'host=source-server dbname=mydb user=postgres', 'public', 'orders', true, 'orders_copy', false, -- skip indexes @@ -195,9 +195,9 @@ SELECT pgclone_table_ex( false -- skip triggers ); --- pgclone_schema_ex(conninfo, schema, include_data, +-- pgclone.schema_ex(conninfo, schema, include_data, -- include_indexes, include_constraints, include_triggers) -SELECT pgclone_schema_ex( +SELECT pgclone.schema_ex( 'host=source-server dbname=mydb user=postgres', 'sales', true, true, -- include indexes @@ -213,7 +213,7 @@ SELECT pgclone_schema_ex( Clone only specific columns from a table: ```sql -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres', 'public', 'users', true, 'users_lite', '{"columns": ["id", "name", "email"]}' @@ -230,14 +230,14 @@ Clone only rows matching a condition: ```sql -- Clone only active users -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres', 'public', 'users', true, 'active_users', '{"where": "status = ''active''"}' ); -- Combine columns + WHERE + disable triggers -SELECT pgclone_table( +SELECT pgclone.table( 'host=source-server dbname=mydb user=postgres', 'public', 'orders', true, 'recent_orders', '{"columns": ["id", "customer_id", "total", "created_at"], @@ -254,22 +254,22 @@ Control what happens when a target table already exists: ```sql -- Error if exists (default) -SELECT pgclone_table(..., '{"conflict": "error"}'); +SELECT pgclone.table(..., '{"conflict": "error"}'); -- Skip if exists -SELECT pgclone_table(..., '{"conflict": "skip"}'); +SELECT pgclone.table(..., '{"conflict": "skip"}'); -- Drop and re-create -SELECT pgclone_table(..., '{"conflict": "replace"}'); +SELECT pgclone.table(..., '{"conflict": "replace"}'); -- Rename existing to tablename_old -SELECT pgclone_table(..., '{"conflict": "rename"}'); +SELECT pgclone.table(..., '{"conflict": "rename"}'); ``` Conflict strategy can be combined with other options: ```sql -SELECT pgclone_schema_async(conn, 'sales', true, +SELECT pgclone.schema_async(conn, 'sales', true, '{"conflict": "replace", "indexes": false, "triggers": false}'); ``` @@ -280,7 +280,7 @@ SELECT pgclone_schema_async(conn, 'sales', true, Materialized views are cloned automatically during schema clone, including their indexes and data. Disable with: ```sql -SELECT pgclone_schema(conn, 'analytics', true, +SELECT pgclone.schema(conn, 'analytics', true, '{"matviews": false}'); ``` @@ -300,23 +300,23 @@ Clone tables with column-level data anonymization. Masking is applied server-sid ```sql -- Mask email addresses: alice@example.com → a***@example.com -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"email": "email"}}'); -- Replace names with XXXX -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"full_name": "name"}}'); -- Keep last 4 digits of phone: +1-555-123-4567 → ***-4567 -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"phone": "phone"}}'); -- Deterministic MD5 hash (preserves referential integrity across tables) -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"email": "hash"}}'); -- Replace with NULL -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"ssn": "null"}}'); ``` @@ -325,15 +325,15 @@ SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', ```sql -- Partial masking: keep first 2 and last 3 chars -- "Johnson" → "Jo***son" -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"last_name": {"type": "partial", "prefix": 2, "suffix": 3}}}'); -- Random integer in range -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"salary": {"type": "random_int", "min": 30000, "max": 150000}}}'); -- Fixed replacement value -SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', +SELECT pgclone.table(conn, 'public', 'users', true, 'users_safe', '{"mask": {"notes": {"type": "constant", "value": "REDACTED"}}}'); ``` @@ -342,7 +342,7 @@ SELECT pgclone_table(conn, 'public', 'users', true, 'users_safe', Masks compose with `columns`, `where`, and all other options: ```sql -SELECT pgclone_table(conn, 'hr', 'employees', true, 'employees_dev', +SELECT pgclone.table(conn, 'hr', 'employees', true, 'employees_dev', '{"mask": {"email": "email", "full_name": "name", "ssn": "null", "salary": {"type": "random_int", "min": 40000, "max": 200000}}, "where": "status = ''active''"}'); ``` @@ -373,7 +373,7 @@ SELECT pgclone_table(conn, 'hr', 'employees', true, 'employees_dev', Automatically scan a source schema for columns that look like sensitive data: ```sql -SELECT pgclone_discover_sensitive( +SELECT pgclone.discover_sensitive( 'host=source-server dbname=mydb user=postgres', 'public' ); @@ -395,7 +395,7 @@ Apply masking to an already-cloned local table without needing the source connec ```sql -- Mask an existing table in place -SELECT pgclone_mask_in_place( +SELECT pgclone.mask_in_place( 'public', 'employees', '{"email": "email", "full_name": "name", "ssn": "null"}' ); @@ -419,7 +419,7 @@ Create role-based masking policies that **preserve original data** while present ### Create a masking policy ```sql -SELECT pgclone_create_masking_policy( +SELECT pgclone.create_masking_policy( 'public', 'employees', '{"email": "email", "full_name": "name", "ssn": "null"}', 'data_admin' -- this role can see unmasked data @@ -438,7 +438,7 @@ After this, regular users query `employees_masked` and see anonymized data. The ### Drop a masking policy ```sql -SELECT pgclone_drop_masking_policy('public', 'employees'); +SELECT pgclone.drop_masking_policy('public', 'employees'); ``` This drops the `employees_masked` view and re-grants `SELECT` on the base table to `PUBLIC`. @@ -447,13 +447,13 @@ This drops the `employees_masked` view and re-grants `SELECT` on the base table ```sql -- 1. Clone production data -SELECT pgclone_table(conn, 'public', 'employees', true); +SELECT pgclone.table(conn, 'public', 'employees', true); -- 2. Discover sensitive columns -SELECT pgclone_discover_sensitive(conn, 'public'); +SELECT pgclone.discover_sensitive(conn, 'public'); -- 3. Apply dynamic masking policy -SELECT pgclone_create_masking_policy( +SELECT pgclone.create_masking_policy( 'public', 'employees', '{"email": "email", "full_name": "name", "salary": {"type": "random_int", "min": 40000, "max": 200000}, "ssn": "null"}', 'dba_team' @@ -488,7 +488,7 @@ Clone database roles from a source PostgreSQL instance to the local target, incl ### Import all roles ```sql -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=source-server dbname=mydb user=postgres password=secret' ); -- OK: 8 roles created, 2 roles updated, 45 grants applied @@ -497,7 +497,7 @@ SELECT pgclone_clone_roles( ### Import specific roles ```sql -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=source-server dbname=mydb user=postgres password=secret', 'app_user, reporting_user, api_service' ); @@ -507,7 +507,7 @@ SELECT pgclone_clone_roles( ### Import a single role ```sql -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=source-server dbname=mydb user=postgres password=secret', 'app_user' ); @@ -538,13 +538,13 @@ If a role already exists on the target: ```sql -- 1. Clone the database structure and data -SELECT pgclone_database( +SELECT pgclone.database( 'host=prod dbname=myapp user=postgres', true ); -- 2. Clone all roles and their permissions -SELECT pgclone_clone_roles( +SELECT pgclone.clone_roles( 'host=prod dbname=myapp user=postgres' ); ``` @@ -563,7 +563,7 @@ Compare row counts between source and target databases to verify clone completen ### Verify a specific schema ```sql -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=source-server dbname=prod user=postgres', 'app_schema' ); @@ -581,7 +581,7 @@ SELECT * FROM pgclone_verify( ### Verify all schemas ```sql -SELECT * FROM pgclone_verify( +SELECT * FROM pgclone.verify( 'host=source-server dbname=prod user=postgres' ); ``` @@ -600,7 +600,7 @@ Returns one row per table across all user schemas. - Row counts use `pg_class.reltuples` for fast approximate counts — no full table scans. Run `ANALYZE` on both source and target for accurate results. - Works with regular and partitioned tables. -- Useful after `pgclone_schema()` or `pgclone_database()` to confirm all data was transferred. +- Useful after `pgclone.schema()` or `pgclone.database()` to confirm all data was transferred. --- @@ -609,7 +609,7 @@ Returns one row per table across all user schemas. Generate an audit report listing all sensitive columns in a schema, their masking status, and recommended actions. ```sql -SELECT * FROM pgclone_masking_report('public'); +SELECT * FROM pgclone.masking_report('public'); ``` ``` @@ -636,24 +636,24 @@ SELECT * FROM pgclone_masking_report('public'); ```sql -- 1. Clone production data -SELECT pgclone_database('host=prod dbname=myapp user=postgres', true); +SELECT pgclone.database('host=prod dbname=myapp user=postgres', true); -- 2. Run masking report — find unmasked PII -SELECT * FROM pgclone_masking_report('public') WHERE mask_status = 'UNMASKED'; +SELECT * FROM pgclone.masking_report('public') WHERE mask_status = 'UNMASKED'; -- 3. Apply masking policies to unmasked tables -SELECT pgclone_create_masking_policy('public', 'employees', +SELECT pgclone.create_masking_policy('public', 'employees', '{"email": "email", "full_name": "name", "ssn": "null"}', 'dba_team'); -- 4. Re-run report — confirm all sensitive columns are now masked -SELECT * FROM pgclone_masking_report('public'); +SELECT * FROM pgclone.masking_report('public'); ``` ### Notes - Only sensitive columns appear in the report (non-sensitive columns are filtered out). -- The report checks for masked views created by `pgclone_create_masking_policy()`. -- Uses the same ~40 sensitivity patterns as `pgclone_discover_sensitive()`. +- The report checks for masked views created by `pgclone.create_masking_policy()`. +- Uses the same ~40 sensitivity patterns as `pgclone.discover_sensitive()`. --- @@ -677,36 +677,36 @@ SELECT * FROM pgclone_masking_report('public'); | Function | Returns | Description | |----------|---------|-------------| -| `pgclone_version()` | text | Extension version string | -| `pgclone_table(conninfo, schema, table, include_data)` | text | Clone a single table | -| `pgclone_table(conninfo, schema, table, include_data, target_name, options)` | text | Clone table with options | -| `pgclone_table_ex(conninfo, schema, table, data, target, idx, constr, trig)` | text | Clone table with boolean flags | -| `pgclone_schema(conninfo, schema, include_data)` | text | Clone entire schema | -| `pgclone_schema(conninfo, schema, include_data, options)` | text | Clone schema with options | -| `pgclone_schema_ex(conninfo, schema, data, idx, constr, trig)` | text | Clone schema with boolean flags | -| `pgclone_functions(conninfo, schema)` | text | Clone functions only | -| `pgclone_database(conninfo, include_data)` | text | Clone database into current DB | -| `pgclone_database(conninfo, include_data, options)` | text | Clone database with options | -| `pgclone_database_create(conninfo, target_db)` | text | Create new DB and clone | -| `pgclone_database_create(conninfo, target_db, include_data, options)` | text | Create new DB and clone with options | -| `pgclone_discover_sensitive(conninfo, schema)` | text | Scan source for sensitive columns, return mask suggestions as JSON | -| `pgclone_mask_in_place(schema, table, mask_json)` | text | Apply masking to existing local table via UPDATE | -| `pgclone_create_masking_policy(schema, table, mask_json, role)` | text | Create dynamic masking view + role-based access | -| `pgclone_drop_masking_policy(schema, table)` | text | Drop masking view + restore base table access | -| `pgclone_clone_roles(conninfo)` | text | Clone all non-system roles with passwords, attributes, memberships, and permissions | -| `pgclone_clone_roles(conninfo, role_names)` | text | Clone specific roles (comma-separated) with passwords, attributes, and permissions | -| `pgclone_verify(conninfo)` | table | Compare row counts for all tables across source and target | -| `pgclone_verify(conninfo, schema)` | table | Compare row counts for tables in a specific schema | -| `pgclone_masking_report(schema)` | table | GDPR/compliance audit: sensitive columns, mask status, recommendations | -| `pgclone_table_async(...)` | int | Async table clone (returns job_id) | -| `pgclone_schema_async(...)` | int | Async schema clone (returns job_id) | -| `pgclone_progress(job_id)` | json | Job progress as JSON | -| `pgclone_jobs()` | json | All jobs as JSON array | -| `pgclone_cancel(job_id)` | bool | Cancel a running job | -| `pgclone_resume(job_id)` | int | Resume failed job (returns new job_id) | -| `pgclone_clear_jobs()` | int | Clear completed/failed jobs | -| `pgclone_progress_detail()` | setof record | All jobs as table-returning function | -| `pgclone_jobs_view` | view | All jobs with progress bar and elapsed time | +| `pgclone.version()` | text | Extension version string | +| `pgclone.table(conninfo, schema, table, include_data)` | text | Clone a single table | +| `pgclone.table(conninfo, schema, table, include_data, target_name, options)` | text | Clone table with options | +| `pgclone.table_ex(conninfo, schema, table, data, target, idx, constr, trig)` | text | Clone table with boolean flags | +| `pgclone.schema(conninfo, schema, include_data)` | text | Clone entire schema | +| `pgclone.schema(conninfo, schema, include_data, options)` | text | Clone schema with options | +| `pgclone.schema_ex(conninfo, schema, data, idx, constr, trig)` | text | Clone schema with boolean flags | +| `pgclone.functions(conninfo, schema)` | text | Clone functions only | +| `pgclone.database(conninfo, include_data)` | text | Clone database into current DB | +| `pgclone.database(conninfo, include_data, options)` | text | Clone database with options | +| `pgclone.database_create(conninfo, target_db)` | text | Create new DB and clone | +| `pgclone.database_create(conninfo, target_db, include_data, options)` | text | Create new DB and clone with options | +| `pgclone.discover_sensitive(conninfo, schema)` | text | Scan source for sensitive columns, return mask suggestions as JSON | +| `pgclone.mask_in_place(schema, table, mask_json)` | text | Apply masking to existing local table via UPDATE | +| `pgclone.create_masking_policy(schema, table, mask_json, role)` | text | Create dynamic masking view + role-based access | +| `pgclone.drop_masking_policy(schema, table)` | text | Drop masking view + restore base table access | +| `pgclone.clone_roles(conninfo)` | text | Clone all non-system roles with passwords, attributes, memberships, and permissions | +| `pgclone.clone_roles(conninfo, role_names)` | text | Clone specific roles (comma-separated) with passwords, attributes, and permissions | +| `pgclone.verify(conninfo)` | table | Compare row counts for all tables across source and target | +| `pgclone.verify(conninfo, schema)` | table | Compare row counts for tables in a specific schema | +| `pgclone.masking_report(schema)` | table | GDPR/compliance audit: sensitive columns, mask status, recommendations | +| `pgclone.table_async(...)` | int | Async table clone (returns job_id) | +| `pgclone.schema_async(...)` | int | Async schema clone (returns job_id) | +| `pgclone.progress(job_id)` | json | Job progress as JSON | +| `pgclone.jobs()` | json | All jobs as JSON array | +| `pgclone.cancel(job_id)` | bool | Cancel a running job | +| `pgclone.resume(job_id)` | int | Resume failed job (returns new job_id) | +| `pgclone.clear_jobs()` | int | Clear completed/failed jobs | +| `pgclone.progress_detail()` | setof record | All jobs as table-returning function | +| `pgclone.jobs_view` | view | All jobs with progress bar and elapsed time | ## Current Limitations diff --git a/pgclone.control b/pgclone.control index 3515b38..0d6fc74 100644 --- a/pgclone.control +++ b/pgclone.control @@ -1,6 +1,6 @@ # pgclone extension comment = 'Clone PostgreSQL databases, schemas, tables, roles and permissions with selective columns, data filtering, data masking/anonymization, async, parallel cloning, materialized views, resume, and conflict resolution' -default_version = '3.6.0' +default_version = '4.0.0' module_pathname = '$libdir/pgclone' relocatable = false superuser = true diff --git a/sql/pgclone--4.0.0.sql b/sql/pgclone--4.0.0.sql new file mode 100644 index 0000000..8fd9484 --- /dev/null +++ b/sql/pgclone--4.0.0.sql @@ -0,0 +1,141 @@ +/* pgclone--4.0.0.sql */ +\echo Use "CREATE EXTENSION pgclone" to load this file. \quit + +-- v4.0.0: All functions now live under the 'pgclone' schema. +-- Usage: SELECT pgclone.table(...), pgclone.schema(...), etc. +CREATE SCHEMA IF NOT EXISTS pgclone; + +-- SYNCHRONOUS +CREATE FUNCTION pgclone.table(source_conninfo TEXT, schema_name TEXT, table_name TEXT, include_data BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_table' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.table(source_conninfo TEXT, schema_name TEXT, table_name TEXT, include_data BOOLEAN, target_table_name TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_table' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.table(source_conninfo TEXT, schema_name TEXT, table_name TEXT, include_data BOOLEAN, target_table_name TEXT, options TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_table' LANGUAGE C VOLATILE; +COMMENT ON FUNCTION pgclone.table(TEXT, TEXT, TEXT, BOOLEAN, TEXT, TEXT) IS 'Clone table with JSON options: {"columns":["col1","col2"], "where":"status=''active''", "indexes":false, "constraints":false, "triggers":false, "mask":{"email":"email","name":"name","phone":"phone","col":{"type":"partial","prefix":2,"suffix":3},"col2":"hash","col3":"null","col4":{"type":"random_int","min":0,"max":100},"col5":{"type":"constant","value":"REDACTED"}}}'; +CREATE FUNCTION pgclone.table_ex(source_conninfo TEXT, schema_name TEXT, table_name TEXT, include_data BOOLEAN, target_table_name TEXT, include_indexes BOOLEAN DEFAULT true, include_constraints BOOLEAN DEFAULT true, include_triggers BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_table_ex' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.schema(source_conninfo TEXT, schema_name TEXT, include_data BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_schema' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.schema(source_conninfo TEXT, schema_name TEXT, include_data BOOLEAN, options TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_schema' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.schema_ex(source_conninfo TEXT, schema_name TEXT, include_data BOOLEAN, include_indexes BOOLEAN DEFAULT true, include_constraints BOOLEAN DEFAULT true, include_triggers BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_schema_ex' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.functions(source_conninfo TEXT, schema_name TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_functions' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.database(source_conninfo TEXT, include_data BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_database' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.database(source_conninfo TEXT, include_data BOOLEAN, options TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_database' LANGUAGE C VOLATILE; + +-- v2.0.1: Create target database and clone into it +CREATE FUNCTION pgclone.database_create(source_conninfo TEXT, target_dbname TEXT, include_data BOOLEAN DEFAULT true) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_database_create' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.database_create(source_conninfo TEXT, target_dbname TEXT, include_data BOOLEAN, options TEXT) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_database_create' LANGUAGE C VOLATILE; +COMMENT ON FUNCTION pgclone.database_create(TEXT, TEXT, BOOLEAN) IS 'Create target database if not exists, then clone all schemas/tables/functions from source. Run from postgres DB.'; + +-- ASYNC (require shared_preload_libraries = 'pgclone') +CREATE FUNCTION pgclone.table_async(source_conninfo TEXT, schema_name TEXT, table_name TEXT, include_data BOOLEAN DEFAULT true, target_table_name TEXT DEFAULT NULL, options TEXT DEFAULT NULL) RETURNS INTEGER AS 'MODULE_PATHNAME', 'pgclone_table_async' LANGUAGE C VOLATILE; +CREATE FUNCTION pgclone.schema_async(source_conninfo TEXT, schema_name TEXT, include_data BOOLEAN DEFAULT true, options TEXT DEFAULT NULL) RETURNS INTEGER AS 'MODULE_PATHNAME', 'pgclone_schema_async' LANGUAGE C VOLATILE; + +-- PROGRESS & JOB MANAGEMENT +CREATE FUNCTION pgclone.progress(job_id INTEGER) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_progress' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.cancel(job_id INTEGER) RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_cancel' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.resume(job_id INTEGER) RETURNS INTEGER AS 'MODULE_PATHNAME', 'pgclone_resume' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.jobs() RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_jobs' LANGUAGE C VOLATILE STRICT; +CREATE FUNCTION pgclone.clear_jobs() RETURNS INTEGER AS 'MODULE_PATHNAME', 'pgclone_clear_jobs' LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.clear_jobs() IS 'Clear completed/failed/cancelled job slots from shared memory'; + +-- v2.1.0+v2.1.1+v2.1.2: Progress Tracking View with progress bar, elapsed time, ETA +CREATE FUNCTION pgclone.progress_detail() +RETURNS TABLE ( + job_id INTEGER, + status TEXT, + op_type TEXT, + schema_name TEXT, + table_name TEXT, + current_phase TEXT, + current_table TEXT, + tables_total BIGINT, + tables_completed BIGINT, + rows_copied BIGINT, + bytes_copied BIGINT, + elapsed_ms BIGINT, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + error_message TEXT, + pct_complete DOUBLE PRECISION, + progress_bar TEXT, + elapsed_time TEXT +) AS 'MODULE_PATHNAME', 'pgclone_progress_view' +LANGUAGE C VOLATILE STRICT; + +COMMENT ON FUNCTION pgclone.progress_detail() IS 'Returns tabular progress with visual progress bar and elapsed time for all clone jobs'; + +-- VIEW: convenient wrapper +CREATE VIEW pgclone.jobs_view AS + SELECT * FROM pgclone.progress_detail(); + +COMMENT ON VIEW pgclone.jobs_view IS 'Live progress tracking view with progress bar and elapsed time for all pgclone async clone jobs'; + +-- v3.1.0: Auto-discovery of sensitive data +CREATE FUNCTION pgclone.discover_sensitive(source_conninfo TEXT, schema_name TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_discover_sensitive' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.discover_sensitive(TEXT, TEXT) IS 'Scan source schema for columns matching sensitive data patterns (email, name, phone, ssn, salary, etc.) and return suggested mask rules as JSON'; + +-- v3.2.0: Static data masking on local tables +CREATE FUNCTION pgclone.mask_in_place(schema_name TEXT, table_name TEXT, mask_json TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_mask_in_place' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.mask_in_place(TEXT, TEXT, TEXT) IS 'Apply data masking to an existing local table via UPDATE. mask_json uses same format as clone mask option: {"email": "email", "name": "name", "ssn": "null"}'; + +-- v3.3.0: Dynamic data masking via views and role-based access +CREATE FUNCTION pgclone.create_masking_policy(schema_name TEXT, table_name TEXT, mask_json TEXT, privileged_role TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_create_masking_policy' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.create_masking_policy(TEXT, TEXT, TEXT, TEXT) IS 'Create a dynamic masking policy: creates a masked view, revokes base table access from PUBLIC, grants view to PUBLIC, grants base table to privileged role'; + +CREATE FUNCTION pgclone.drop_masking_policy(schema_name TEXT, table_name TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_drop_masking_policy' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.drop_masking_policy(TEXT, TEXT) IS 'Remove a dynamic masking policy: drops the masked view and restores base table access to PUBLIC'; + +-- v3.4.0: Clone roles with permissions and passwords +CREATE FUNCTION pgclone.clone_roles(source_conninfo TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_clone_roles' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.clone_roles(TEXT) IS 'Clone all non-system roles from source with encrypted passwords, attributes, memberships, and all permissions. Requires superuser on both source and target.'; + +CREATE FUNCTION pgclone.clone_roles(source_conninfo TEXT, role_names TEXT) +RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_clone_roles' +LANGUAGE C VOLATILE; +COMMENT ON FUNCTION pgclone.clone_roles(TEXT, TEXT) IS 'Clone specific roles (comma-separated) from source with encrypted passwords, attributes, memberships, and permissions. If role exists on target, syncs password and attributes without dropping.'; + +-- v3.5.0: Clone verification — compare row counts +CREATE FUNCTION pgclone.verify(source_conninfo TEXT, schema_name TEXT) +RETURNS TABLE ( + schema_name TEXT, + table_name TEXT, + source_rows BIGINT, + target_rows BIGINT, + match TEXT +) AS 'MODULE_PATHNAME', 'pgclone_verify' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.verify(TEXT, TEXT) IS 'Compare row counts between source and local target for all tables in a schema. Returns side-by-side comparison with match status.'; + +CREATE FUNCTION pgclone.verify(source_conninfo TEXT) +RETURNS TABLE ( + schema_name TEXT, + table_name TEXT, + source_rows BIGINT, + target_rows BIGINT, + match TEXT +) AS 'MODULE_PATHNAME', 'pgclone_verify' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.verify(TEXT) IS 'Compare row counts between source and local target for all user tables across all schemas. Returns side-by-side comparison with match status.'; + +-- v3.6.0: GDPR/Compliance masking report +CREATE FUNCTION pgclone.masking_report(schema_name TEXT) +RETURNS TABLE ( + schema_name TEXT, + table_name TEXT, + column_name TEXT, + sensitivity TEXT, + mask_status TEXT, + recommendation TEXT +) AS 'MODULE_PATHNAME', 'pgclone_masking_report' +LANGUAGE C VOLATILE STRICT; +COMMENT ON FUNCTION pgclone.masking_report(TEXT) IS 'Generate GDPR/compliance audit report: lists sensitive columns, their masking status, and recommendations. Checks for masked views.'; + +-- VERSION +CREATE FUNCTION pgclone.version() RETURNS TEXT AS 'MODULE_PATHNAME', 'pgclone_version' LANGUAGE C IMMUTABLE STRICT; diff --git a/src/pgclone.c b/src/pgclone.c index 37ef102..6dc536c 100644 --- a/src/pgclone.c +++ b/src/pgclone.c @@ -2451,18 +2451,18 @@ pgclone_database_create(PG_FUNCTION_ARGS) if (options_json != NULL) { - /* 3-arg: pgclone_database(conninfo, include_data, options) */ + /* 3-arg: pgclone.database(conninfo, include_data, options) */ appendStringInfo(&buf, - "SELECT pgclone_database(%s, %s, %s)", + "SELECT pgclone.database(%s, %s, %s)", quote_literal_cstr(source_conninfo), include_data ? "true" : "false", quote_literal_cstr(options_json)); } else { - /* 2-arg: pgclone_database(conninfo, include_data) */ + /* 2-arg: pgclone.database(conninfo, include_data) */ appendStringInfo(&buf, - "SELECT pgclone_database(%s, %s)", + "SELECT pgclone.database(%s, %s)", quote_literal_cstr(source_conninfo), include_data ? "true" : "false"); } @@ -3981,7 +3981,7 @@ PG_FUNCTION_INFO_V1(pgclone_version); Datum pgclone_version(PG_FUNCTION_ARGS) { - PG_RETURN_TEXT_P(cstring_to_text("pgclone 3.6.0")); + PG_RETURN_TEXT_P(cstring_to_text("pgclone 4.0.0")); } /* =============================================================== diff --git a/test/pgclone_test.sql b/test/pgclone_test.sql index 4477f06..3ae9092 100644 --- a/test/pgclone_test.sql +++ b/test/pgclone_test.sql @@ -22,19 +22,19 @@ SELECT lives_ok( ); SELECT matches( - pgclone_version(), + pgclone.version(), '^pgclone ', - 'pgclone_version() returns version string' + 'pgclone.version() returns version string' ); -- ============================================================ -- TEST GROUP 2: Clone single table (structure + data) -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true)', + format('SELECT pgclone.table(%L, %L, %L, true)', current_setting('app.source_conninfo'), 'public', 'simple_test'), - 'pgclone_table clones simple_test with data' + 'pgclone.table clones simple_test with data' ); SELECT has_table('public', 'simple_test', 'simple_test table exists locally'); @@ -50,10 +50,10 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L)', current_setting('app.source_conninfo'), 'public', 'simple_test', 'simple_test_copy'), - 'pgclone_table clones with different target name' + 'pgclone.table clones with different target name' ); SELECT has_table('public', 'simple_test_copy', 'simple_test_copy table exists'); @@ -69,10 +69,10 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, false, %L)', + format('SELECT pgclone.table(%L, %L, %L, false, %L)', current_setting('app.source_conninfo'), 'public', 'simple_test', 'simple_test_empty'), - 'pgclone_table clones structure only' + 'pgclone.table clones structure only' ); SELECT has_table('public', 'simple_test_empty', 'simple_test_empty table exists'); @@ -88,10 +88,10 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_schema(%L, %L, true)', + format('SELECT pgclone.schema(%L, %L, true)', current_setting('app.source_conninfo'), 'test_schema'), - 'pgclone_schema clones test_schema' + 'pgclone.schema clones test_schema' ); SELECT has_schema('test_schema', 'test_schema exists'); @@ -167,11 +167,11 @@ SELECT has_function( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'customers_lite', '{"columns": ["id", "name", "email"]}'), - 'pgclone_table with selective columns' + 'pgclone.table with selective columns' ); SELECT has_table('test_schema', 'customers_lite', 'customers_lite created'); @@ -188,11 +188,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'active_only', '{"where": "status = ''active''"}'), - 'pgclone_table with WHERE filter' + 'pgclone.table with WHERE filter' ); SELECT results_eq( @@ -207,7 +207,7 @@ SELECT results_eq( -- Test: semicolon in WHERE clause must be rejected SELECT throws_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'inject_test1', '{"where": "1=1; DROP TABLE customers; --"}'), @@ -218,7 +218,7 @@ SELECT throws_ok( -- Test: DROP keyword in WHERE clause must be rejected SELECT throws_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'inject_test2', '{"where": "1=1 OR DROP TABLE customers"}'), @@ -229,7 +229,7 @@ SELECT throws_ok( -- Test: INSERT keyword in WHERE clause must be rejected SELECT throws_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'inject_test3', '{"where": "1=1 OR INSERT INTO customers VALUES(999)"}'), @@ -240,7 +240,7 @@ SELECT throws_ok( -- Test: valid WHERE with column named 'created_at' must NOT trigger false positive on CREATE SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'customers', 'no_false_positive', '{"where": "created_at IS NOT NULL"}'), @@ -252,11 +252,11 @@ SELECT lives_ok( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_email', '{"mask": {"email": "email"}}'), - 'pgclone_table with email mask' + 'pgclone.table with email mask' ); SELECT has_table('test_schema', 'employees_masked_email', 'masked email table created'); @@ -288,11 +288,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_name', '{"mask": {"full_name": "name"}}'), - 'pgclone_table with name mask' + 'pgclone.table with name mask' ); -- All names should be XXXX @@ -308,11 +308,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_null', '{"mask": {"ssn": "null"}}'), - 'pgclone_table with null mask on ssn' + 'pgclone.table with null mask on ssn' ); SELECT results_eq( @@ -327,11 +327,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_hash', '{"mask": {"email": "hash"}}'), - 'pgclone_table with hash mask on email' + 'pgclone.table with hash mask on email' ); -- Hashed emails should be 32-char hex strings (md5) @@ -347,11 +347,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_const', '{"mask": {"notes": {"type": "constant", "value": "REDACTED"}}}'), - 'pgclone_table with constant mask on notes' + 'pgclone.table with constant mask on notes' ); -- Non-null notes should be REDACTED (NULL notes stay NULL is acceptable too) @@ -367,11 +367,11 @@ SELECT results_eq( -- ============================================================ SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_masked_combo', '{"mask": {"email": "email", "full_name": "name", "ssn": "null"}, "where": "salary > 60000"}'), - 'pgclone_table with combined masks and WHERE filter' + 'pgclone.table with combined masks and WHERE filter' ); -- WHERE salary > 60000 should give 4 rows (Alice=95k, Bob=82k, Charlie=67k, Diana=120k) @@ -395,44 +395,44 @@ SELECT results_eq( -- Discover sensitive columns in test_schema (from source) SELECT lives_ok( - format('SELECT pgclone_discover_sensitive(%L, %L)', + format('SELECT pgclone.discover_sensitive(%L, %L)', current_setting('app.source_conninfo'), 'test_schema'), - 'pgclone_discover_sensitive runs without error' + 'pgclone.discover_sensitive runs without error' ); -- Result should contain JSON with employees table detected columns -- The employees table has: full_name, email, phone, salary, ssn SELECT ok( - (SELECT pgclone_discover_sensitive( + (SELECT pgclone.discover_sensitive( current_setting('app.source_conninfo'), 'test_schema')::text LIKE '%email%'), 'discover detects email column' ); SELECT ok( - (SELECT pgclone_discover_sensitive( + (SELECT pgclone.discover_sensitive( current_setting('app.source_conninfo'), 'test_schema')::text LIKE '%full_name%'), 'discover detects full_name column' ); SELECT ok( - (SELECT pgclone_discover_sensitive( + (SELECT pgclone.discover_sensitive( current_setting('app.source_conninfo'), 'test_schema')::text LIKE '%phone%'), 'discover detects phone column' ); SELECT ok( - (SELECT pgclone_discover_sensitive( + (SELECT pgclone.discover_sensitive( current_setting('app.source_conninfo'), 'test_schema')::text LIKE '%salary%'), 'discover detects salary column' ); SELECT ok( - (SELECT pgclone_discover_sensitive( + (SELECT pgclone.discover_sensitive( current_setting('app.source_conninfo'), 'test_schema')::text LIKE '%ssn%'), 'discover detects ssn column' @@ -444,7 +444,7 @@ SELECT ok( -- First: clone employees table without masking SELECT lives_ok( - format('SELECT pgclone_table(%L, %L, %L, true, %L)', + format('SELECT pgclone.table(%L, %L, %L, true, %L)', current_setting('app.source_conninfo'), 'test_schema', 'employees', 'employees_inplace'), 'clone employees table for in-place masking' @@ -460,10 +460,10 @@ SELECT results_eq( -- Apply in-place masking SELECT lives_ok( - $$SELECT pgclone_mask_in_place( + $$SELECT pgclone.mask_in_place( 'test_schema', 'employees_inplace', '{"email": "email", "full_name": "name", "ssn": "null"}')$$, - 'pgclone_mask_in_place runs without error' + 'pgclone.mask_in_place runs without error' ); -- Verify emails are masked diff --git a/test/run_tests.sh b/test/run_tests.sh index 06af4e1..3aa3f5c 100755 --- a/test/run_tests.sh +++ b/test/run_tests.sh @@ -34,7 +34,7 @@ echo "Installing extensions..." psql -U postgres -d target_db <&1 diff --git a/test/test_async.sh b/test/test_async.sh index dc0acf2..ebaf761 100755 --- a/test/test_async.sh +++ b/test/test_async.sh @@ -1,9 +1,9 @@ #!/bin/bash # ============================================================ # pgclone async functions test -# Tests: pgclone_table_async, pgclone_schema_async, -# pgclone_progress, pgclone_cancel, pgclone_clear_jobs, -# pgclone_jobs_view +# Tests: pgclone.table_async, pgclone.schema_async, +# pgclone.progress, pgclone.cancel, pgclone.clear_jobs, +# pgclone.jobs_view # # Requires shared_preload_libraries = 'pgclone' # ============================================================ @@ -34,13 +34,13 @@ run_test() { # Ensure pgclone is loaded in shared_preload_libraries # The Dockerfile/run_tests.sh should handle this, but verify echo "Checking shared memory initialization..." -SHM_CHECK=$(psql -U postgres -d target_db -tAc "SELECT pgclone_version();" 2>&1) +SHM_CHECK=$(psql -U postgres -d target_db -tAc "SELECT pgclone.version();" 2>&1) echo " pgclone version: $SHM_CHECK" # ---- Clean slate ---- echo "" echo "Preparing clean environment..." -psql -U postgres -d target_db -c "SELECT pgclone_clear_jobs();" 2>/dev/null || true +psql -U postgres -d target_db -c "SELECT pgclone.clear_jobs();" 2>/dev/null || true # Drop tables/schemas created by previous sync tests (pgTAP ROLLBACK # doesn't undo DDL done via loopback libpq connections) @@ -56,19 +56,19 @@ SQL # fresh schema 'async_test_schema' to avoid conflicts with sync tests # ============================================================ -# TEST 1: pgclone_table_async — basic table clone +# TEST 1: pgclone.table_async — basic table clone # ============================================================ echo "" -echo "---- TEST 1: pgclone_table_async basic ----" +echo "---- TEST 1: pgclone.table_async basic ----" JOB_ID=$(psql -U postgres -d target_db -tAc " - SELECT pgclone_table_async( + SELECT pgclone.table_async( '${SOURCE_CONNINFO}', 'public', 'simple_test', true ); ") -run_test "pgclone_table_async returns job_id" "[ -n '$JOB_ID' ] && [ '$JOB_ID' -gt 0 ]" +run_test "pgclone.table_async returns job_id" "[ -n '$JOB_ID' ] && [ '$JOB_ID' -gt 0 ]" echo " Job ID: $JOB_ID" @@ -76,7 +76,7 @@ echo " Job ID: $JOB_ID" echo " Waiting for job to complete..." for i in $(seq 1 30); do STATUS=$(psql -U postgres -d target_db -tAc " - SELECT status FROM pgclone_jobs_view WHERE job_id = $JOB_ID; + SELECT status FROM pgclone.jobs_view WHERE job_id = $JOB_ID; " 2>/dev/null || echo "unknown") STATUS=$(echo "$STATUS" | tr -d '[:space:]') if [ "$STATUS" = "completed" ] || [ "$STATUS" = "failed" ]; then @@ -93,13 +93,13 @@ ROW_COUNT=$(echo "$ROW_COUNT" | tr -d '[:space:]') run_test "simple_test has 5 rows (async)" "[ '$ROW_COUNT' = '5' ]" # ============================================================ -# TEST 2: pgclone_table_async with different target name +# TEST 2: pgclone.table_async with different target name # ============================================================ echo "" -echo "---- TEST 2: pgclone_table_async with target name ----" +echo "---- TEST 2: pgclone.table_async with target name ----" JOB_ID2=$(psql -U postgres -d target_db -tAc " - SELECT pgclone_table_async( + SELECT pgclone.table_async( '${SOURCE_CONNINFO}', 'public', 'simple_test', true, 'async_renamed' @@ -110,7 +110,7 @@ echo " Job ID: $JOB_ID2" for i in $(seq 1 30); do STATUS2=$(psql -U postgres -d target_db -tAc " - SELECT status FROM pgclone_jobs_view WHERE job_id = $JOB_ID2; + SELECT status FROM pgclone.jobs_view WHERE job_id = $JOB_ID2; " 2>/dev/null || echo "unknown") STATUS2=$(echo "$STATUS2" | tr -d '[:space:]') if [ "$STATUS2" = "completed" ] || [ "$STATUS2" = "failed" ]; then @@ -132,16 +132,16 @@ ROW_COUNT2=$(echo "$ROW_COUNT2" | tr -d '[:space:]') run_test "async_renamed has 5 rows" "[ '$ROW_COUNT2' = '5' ]" # ============================================================ -# TEST 3: pgclone_schema_async — sequential mode +# TEST 3: pgclone.schema_async — sequential mode # ============================================================ echo "" -echo "---- TEST 3: pgclone_schema_async (sequential) ----" +echo "---- TEST 3: pgclone.schema_async (sequential) ----" # Drop test_schema so async clone has a clean target psql -U postgres -d target_db -c "DROP SCHEMA IF EXISTS test_schema CASCADE;" 2>/dev/null || true JOB_ID3=$(psql -U postgres -d target_db -tAc " - SELECT pgclone_schema_async( + SELECT pgclone.schema_async( '${SOURCE_CONNINFO}', 'test_schema', true ); @@ -151,7 +151,7 @@ echo " Job ID: $JOB_ID3" for i in $(seq 1 60); do STATUS3=$(psql -U postgres -d target_db -tAc " - SELECT status FROM pgclone_jobs_view WHERE job_id = $JOB_ID3; + SELECT status FROM pgclone.jobs_view WHERE job_id = $JOB_ID3; " 2>/dev/null || echo "unknown") STATUS3=$(echo "$STATUS3" | tr -d '[:space:]') if [ "$STATUS3" = "completed" ] || [ "$STATUS3" = "failed" ]; then @@ -172,58 +172,58 @@ ORD_COUNT=$(echo "$ORD_COUNT" | tr -d '[:space:]') run_test "test_schema.orders has 10 rows (async)" "[ '$ORD_COUNT' = '10' ]" # ============================================================ -# TEST 4: pgclone_progress — check progress JSON +# TEST 4: pgclone.progress — check progress JSON # ============================================================ echo "" -echo "---- TEST 4: pgclone_progress ----" +echo "---- TEST 4: pgclone.progress ----" -PROGRESS=$(psql -U postgres -d target_db -tAc "SELECT pgclone_progress($JOB_ID);" 2>/dev/null || echo "null") -run_test "pgclone_progress returns JSON" "echo '$PROGRESS' | grep -q 'job_id'" +PROGRESS=$(psql -U postgres -d target_db -tAc "SELECT pgclone.progress($JOB_ID);" 2>/dev/null || echo "null") +run_test "pgclone.progress returns JSON" "echo '$PROGRESS' | grep -q 'job_id'" # ============================================================ -# TEST 5: pgclone_jobs — list all jobs +# TEST 5: pgclone.jobs — list all jobs # ============================================================ echo "" -echo "---- TEST 5: pgclone_jobs ----" +echo "---- TEST 5: pgclone.jobs ----" -JOBS_JSON=$(psql -U postgres -d target_db -tAc "SELECT pgclone_jobs();" 2>/dev/null || echo "[]") -run_test "pgclone_jobs returns JSON array" "echo '$JOBS_JSON' | grep -q 'job_id'" +JOBS_JSON=$(psql -U postgres -d target_db -tAc "SELECT pgclone.jobs();" 2>/dev/null || echo "[]") +run_test "pgclone.jobs returns JSON array" "echo '$JOBS_JSON' | grep -q 'job_id'" # ============================================================ -# TEST 6: pgclone_jobs_view — progress view +# TEST 6: pgclone.jobs_view — progress view # ============================================================ echo "" -echo "---- TEST 6: pgclone_jobs_view ----" +echo "---- TEST 6: pgclone.jobs_view ----" -VIEW_COUNT=$(psql -U postgres -d target_db -tAc "SELECT count(*) FROM pgclone_jobs_view;" 2>/dev/null || echo "0") +VIEW_COUNT=$(psql -U postgres -d target_db -tAc "SELECT count(*) FROM pgclone.jobs_view;" 2>/dev/null || echo "0") VIEW_COUNT=$(echo "$VIEW_COUNT" | tr -d '[:space:]') -run_test "pgclone_jobs_view has rows" "[ '$VIEW_COUNT' -ge 1 ]" +run_test "pgclone.jobs_view has rows" "[ '$VIEW_COUNT' -ge 1 ]" # Check progress bar column BAR_CHECK=$(psql -U postgres -d target_db -tAc " - SELECT progress_bar FROM pgclone_jobs_view WHERE job_id = $JOB_ID LIMIT 1; + SELECT progress_bar FROM pgclone.jobs_view WHERE job_id = $JOB_ID LIMIT 1; " 2>/dev/null || echo "") run_test "progress_bar column populated" "[ -n '$BAR_CHECK' ]" # Check elapsed_time column TIME_CHECK=$(psql -U postgres -d target_db -tAc " - SELECT elapsed_time FROM pgclone_jobs_view WHERE job_id = $JOB_ID LIMIT 1; + SELECT elapsed_time FROM pgclone.jobs_view WHERE job_id = $JOB_ID LIMIT 1; " 2>/dev/null || echo "") TIME_CHECK=$(echo "$TIME_CHECK" | tr -d '[:space:]') run_test "elapsed_time column populated" "[ -n '$TIME_CHECK' ]" # ============================================================ -# TEST 7: pgclone_clear_jobs +# TEST 7: pgclone.clear_jobs # ============================================================ echo "" -echo "---- TEST 7: pgclone_clear_jobs ----" +echo "---- TEST 7: pgclone.clear_jobs ----" -CLEARED=$(psql -U postgres -d target_db -tAc "SELECT pgclone_clear_jobs();" 2>/dev/null || echo "0") +CLEARED=$(psql -U postgres -d target_db -tAc "SELECT pgclone.clear_jobs();" 2>/dev/null || echo "0") CLEARED=$(echo "$CLEARED" | tr -d '[:space:]') -run_test "pgclone_clear_jobs clears completed jobs" "[ '$CLEARED' -ge 1 ]" +run_test "pgclone.clear_jobs clears completed jobs" "[ '$CLEARED' -ge 1 ]" # Verify jobs are cleared -REMAINING=$(psql -U postgres -d target_db -tAc "SELECT count(*) FROM pgclone_jobs_view;" 2>/dev/null || echo "999") +REMAINING=$(psql -U postgres -d target_db -tAc "SELECT count(*) FROM pgclone.jobs_view;" 2>/dev/null || echo "999") REMAINING=$(echo "$REMAINING" | tr -d '[:space:]') run_test "All completed jobs cleared from view" "[ '$REMAINING' = '0' ]" @@ -237,7 +237,7 @@ echo "---- TEST 8: Worker Pool (parallel schema clone) ----" psql -U postgres -d target_db -c "DROP SCHEMA IF EXISTS test_schema CASCADE;" 2>/dev/null || true JOB_ID8=$(psql -U postgres -d target_db -tAc " - SELECT pgclone_schema_async( + SELECT pgclone.schema_async( '${SOURCE_CONNINFO}', 'test_schema', true, '{\"parallel\": 2}' @@ -251,7 +251,7 @@ run_test "Pool parent job_id returned" "[ -n '$JOB_ID8' ] && [ '$JOB_ID8' -gt 0 # Wait for completion (pool workers + parent finalization) for i in $(seq 1 60); do STATUS8=$(psql -U postgres -d target_db -tAc " - SELECT status FROM pgclone_jobs_view WHERE job_id = $JOB_ID8; + SELECT status FROM pgclone.jobs_view WHERE job_id = $JOB_ID8; " 2>/dev/null || echo "unknown") STATUS8=$(echo "$STATUS8" | tr -d '[:space:]') if [ "$STATUS8" = "completed" ] || [ "$STATUS8" = "failed" ]; then @@ -273,14 +273,14 @@ run_test "Pool: test_schema.orders has 10 rows" "[ '$POOL_ORD' = '10' ]" # Verify pool workers show as separate jobs (type = table, parallel_workers = -1 sentinel) POOL_WORKERS=$(psql -U postgres -d target_db -tAc " - SELECT count(*) FROM pgclone_jobs_view + SELECT count(*) FROM pgclone.jobs_view WHERE job_id > $JOB_ID8 AND op_type = 'table'; " 2>/dev/null || echo "0") POOL_WORKERS=$(echo "$POOL_WORKERS" | tr -d '[:space:]') run_test "Pool workers visible in jobs_view" "[ '$POOL_WORKERS' -ge 1 ]" # Clean up for final test -psql -U postgres -d target_db -c "SELECT pgclone_clear_jobs();" 2>/dev/null || true +psql -U postgres -d target_db -c "SELECT pgclone.clear_jobs();" 2>/dev/null || true # ============================================================ # RESULTS diff --git a/test/test_database_create.sh b/test/test_database_create.sh index 4935918..901775f 100755 --- a/test/test_database_create.sh +++ b/test/test_database_create.sh @@ -1,6 +1,6 @@ #!/bin/bash # ============================================================ -# pgclone_database_create test +# pgclone.database_create test # Tests creating a new database and cloning into it # Must run outside pgTAP transaction (CREATE DATABASE) # ============================================================ @@ -8,7 +8,7 @@ set -e echo "============================================" -echo "Testing pgclone_database_create" +echo "Testing pgclone.database_create" echo "============================================" SOURCE_CONNINFO="host=source-db dbname=source_db user=postgres password=testpass" @@ -25,16 +25,16 @@ psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS clone_test_db;" 2>/dev/ # ---- TEST 1: Create new database and clone ---- echo "" -echo "TEST 1: pgclone_database_create creates DB and clones" +echo "TEST 1: pgclone.database_create creates DB and clones" psql -U postgres -d postgres -v ON_ERROR_STOP=1 </dev/ echo "" echo "============================================" -echo "ALL pgclone_database_create TESTS PASSED" +echo "ALL pgclone.database_create TESTS PASSED" echo "============================================" diff --git a/test/test_loopback.sh b/test/test_loopback.sh index 0916b4e..73c0598 100755 --- a/test/test_loopback.sh +++ b/test/test_loopback.sh @@ -36,8 +36,8 @@ echo "============================================" # ---- Clone roles ---- echo "" echo "---- Clone roles ----" -RESULT=$(pg "SELECT pgclone_clone_roles('${SOURCE_CONNINFO}');" || echo "ERROR") -run_test "pgclone_clone_roles runs without error" "[ '$RESULT' != 'ERROR' ]" +RESULT=$(pg "SELECT pgclone.clone_roles('${SOURCE_CONNINFO}');" || echo "ERROR") +run_test "pgclone.clone_roles runs without error" "[ '$RESULT' != 'ERROR' ]" R1=$(pg "SELECT 1 FROM pg_roles WHERE rolname = 'test_reader';" || echo "0") run_test "test_reader role exists" "[ '$R1' = '1' ]" @@ -57,41 +57,41 @@ run_test "test_admin has CREATEDB" "[ '$R5' = 't' ]" # ---- Clone verification ---- echo "" echo "---- Clone verification ----" -VC=$(pg "SELECT count(*) FROM pgclone_verify('${SOURCE_CONNINFO}', 'test_schema');" || echo "0") -run_test "pgclone_verify returns rows" "[ '$VC' -ge 1 ]" +VC=$(pg "SELECT count(*) FROM pgclone.verify('${SOURCE_CONNINFO}', 'test_schema');" || echo "0") +run_test "pgclone.verify returns rows" "[ '$VC' -ge 1 ]" -MATCH=$(pg "SELECT match FROM pgclone_verify('${SOURCE_CONNINFO}', 'test_schema') WHERE table_name = 'customers' LIMIT 1;" || echo "MISSING") +MATCH=$(pg "SELECT match FROM pgclone.verify('${SOURCE_CONNINFO}', 'test_schema') WHERE table_name = 'customers' LIMIT 1;" || echo "MISSING") run_test "customers table shows match" "echo '$MATCH' | grep -qv 'missing'" -VC2=$(pg "SELECT count(*) FROM pgclone_verify('${SOURCE_CONNINFO}');" || echo "0") -run_test "pgclone_verify all-schemas works" "[ '$VC2' -ge 1 ]" +VC2=$(pg "SELECT count(*) FROM pgclone.verify('${SOURCE_CONNINFO}');" || echo "0") +run_test "pgclone.verify all-schemas works" "[ '$VC2' -ge 1 ]" -MATCH2=$(pg "SELECT match FROM pgclone_verify('${SOURCE_CONNINFO}', 'public') WHERE table_name = 'simple_test' LIMIT 1;" || echo "MISSING") +MATCH2=$(pg "SELECT match FROM pgclone.verify('${SOURCE_CONNINFO}', 'public') WHERE table_name = 'simple_test' LIMIT 1;" || echo "MISSING") run_test "simple_test shows match" "echo '$MATCH2' | grep -qv 'missing'" # ---- GDPR masking report ---- echo "" echo "---- GDPR masking report ----" -RC=$(pg "SELECT count(*) FROM pgclone_masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'email';" || echo "0") +RC=$(pg "SELECT count(*) FROM pgclone.masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'email';" || echo "0") run_test "report detects email column" "[ '$RC' = '1' ]" -SENS=$(pg "SELECT sensitivity FROM pgclone_masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'full_name' LIMIT 1;" || echo "") +SENS=$(pg "SELECT sensitivity FROM pgclone.masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'full_name' LIMIT 1;" || echo "") run_test "full_name detected as PII - Name" "[ '$SENS' = 'PII - Name' ]" -SENS2=$(pg "SELECT sensitivity FROM pgclone_masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'ssn' LIMIT 1;" || echo "") +SENS2=$(pg "SELECT sensitivity FROM pgclone.masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'ssn' LIMIT 1;" || echo "") run_test "ssn detected as National ID" "[ '$SENS2' = 'National ID' ]" -STATUS=$(pg "SELECT mask_status FROM pgclone_masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'email' LIMIT 1;" || echo "") +STATUS=$(pg "SELECT mask_status FROM pgclone.masking_report('test_schema') WHERE table_name = 'employees' AND column_name = 'email' LIMIT 1;" || echo "") run_test "email shows UNMASKED" "[ '$STATUS' = 'UNMASKED' ]" # ---- Dynamic data masking ---- echo "" echo "---- Dynamic data masking ----" pg "DROP TABLE IF EXISTS test_schema.employees_ddm CASCADE;" || true -RESULT=$(pg "SELECT pgclone_table('${SOURCE_CONNINFO}', 'test_schema', 'employees', true, 'employees_ddm');" || echo "ERROR") +RESULT=$(pg "SELECT pgclone.table('${SOURCE_CONNINFO}', 'test_schema', 'employees', true, 'employees_ddm');" || echo "ERROR") run_test "clone employees for DDM" "[ '$RESULT' != 'ERROR' ]" -RESULT=$(pg "SELECT pgclone_create_masking_policy('test_schema', 'employees_ddm', '{\"email\": \"email\", \"full_name\": \"name\", \"ssn\": \"null\"}', 'postgres');" || echo "ERROR") +RESULT=$(pg "SELECT pgclone.create_masking_policy('test_schema', 'employees_ddm', '{\"email\": \"email\", \"full_name\": \"name\", \"ssn\": \"null\"}', 'postgres');" || echo "ERROR") run_test "create_masking_policy runs" "[ '$RESULT' != 'ERROR' ]" VIEW_EXISTS=$(pg "SELECT 1 FROM pg_views WHERE schemaname = 'test_schema' AND viewname = 'employees_ddm_masked';" || echo "0") @@ -106,7 +106,7 @@ run_test "masked view shows NULL for SSNs" "[ '$NULLS' = '5' ]" ROWS=$(pg "SELECT count(*) FROM test_schema.employees_ddm_masked;" || echo "0") run_test "masked view has 5 rows" "[ '$ROWS' = '5' ]" -RESULT=$(pg "SELECT pgclone_drop_masking_policy('test_schema', 'employees_ddm');" || echo "ERROR") +RESULT=$(pg "SELECT pgclone.drop_masking_policy('test_schema', 'employees_ddm');" || echo "ERROR") run_test "drop_masking_policy runs" "[ '$RESULT' != 'ERROR' ]" pg "DROP TABLE IF EXISTS test_schema.employees_ddm CASCADE;" || true