From 1008ad304aba3043c3c08d3c203dafbd158f0134 Mon Sep 17 00:00:00 2001 From: Rene Cannao Date: Fri, 3 Apr 2026 07:28:07 +0000 Subject: [PATCH] docs: write full content for group replication, fan-in, NDB, PXC, InnoDB Cluster, MySQL provider --- website/astro.config.mjs | 1 + .../docs/deploying/fan-in-all-masters.md | 98 ++++++++++++- .../docs/deploying/group-replication.md | 100 ++++++++++++- .../content/docs/deploying/innodb-cluster.md | 136 ++++++++++++++++++ .../src/content/docs/deploying/ndb-cluster.md | 82 ++++++++++- website/src/content/docs/providers/mysql.md | 131 ++++++++++++++--- website/src/content/docs/providers/pxc.md | 88 +++++++++++- 7 files changed, 612 insertions(+), 24 deletions(-) create mode 100644 website/src/content/docs/deploying/innodb-cluster.md diff --git a/website/astro.config.mjs b/website/astro.config.mjs index 2c1530d..a38813f 100644 --- a/website/astro.config.mjs +++ b/website/astro.config.mjs @@ -41,6 +41,7 @@ export default defineConfig({ { label: 'Group Replication', slug: 'deploying/group-replication' }, { label: 'Fan-In & All-Masters', slug: 'deploying/fan-in-all-masters' }, { label: 'NDB Cluster', slug: 'deploying/ndb-cluster' }, + { label: 'InnoDB Cluster', slug: 'deploying/innodb-cluster' }, ], }, { diff --git a/website/src/content/docs/deploying/fan-in-all-masters.md b/website/src/content/docs/deploying/fan-in-all-masters.md index 750860e..0b41e13 100644 --- a/website/src/content/docs/deploying/fan-in-all-masters.md +++ b/website/src/content/docs/deploying/fan-in-all-masters.md @@ -1,6 +1,100 @@ --- title: Fan-In & All-Masters -description: Fan-In & All-Masters documentation +description: Deploy multi-source replication topologies with dbdeployer — fan-in and all-masters. --- -Coming soon. +dbdeployer supports two multi-source replication topologies where nodes receive writes from more than one master. Both require MySQL 5.7.9 or later. + +## Fan-In + +Fan-in is the inverse of master-slave: multiple masters feed into a single slave. This is useful for consolidating writes from many sources into one replica — for example, aggregating data from multiple application databases. + +```bash +dbdeployer deploy replication 8.4.8 --topology=fan-in +``` + +Default layout: nodes 1 and 2 are masters, node 3 is the slave. + +``` +~/sandboxes/fan_in_msb_8_4_8/ +├── node1/ # master +├── node2/ # master +├── node3/ # slave (replicates from both masters) +├── check_slaves +├── test_replication +└── use_all +``` + +### Custom Master and Slave Lists + +Use `--master-list` and `--slave-list` with `--nodes` to define any layout: + +```bash +dbdeployer deploy replication 8.4.8 --topology=fan-in \ + --nodes=5 \ + --master-list="1,2,3" \ + --slave-list="4,5" \ + --concurrent +``` + +This creates 5 nodes where nodes 1–3 are masters and nodes 4–5 each replicate from all three masters. + +### Verifying Fan-In Replication + +```bash +~/sandboxes/fan_in_msb_8_4_8/test_replication +# master 1 +# master 2 +# slave 3 +# ok - '2' == '2' - Slaves received tables from all masters +# pass: 1 +# fail: 0 +``` + +## All-Masters + +In the all-masters topology, every node is simultaneously a master and a slave of every other node. This creates a fully-connected circular replication graph where a write on any node propagates to all others. + +```bash +dbdeployer deploy replication 8.4.8 --topology=all-masters +``` + +Default: 3 nodes, each replicating from the other two. + +``` +~/sandboxes/all_masters_msb_8_4_8/ +├── node1/ # master + slave +├── node2/ # master + slave +├── node3/ # master + slave +├── check_slaves +├── test_replication +└── use_all +``` + +### Use Cases + +**Fan-in** is suited for: +- Data warehouses that consolidate writes from multiple OLTP sources +- Centralized audit or logging replicas +- Cross-shard aggregation in sharded setups + +**All-masters** is suited for: +- Testing multi-source conflict scenarios +- Active-active setups where all nodes need to accept writes and stay in sync +- Exploring MySQL's multi-source replication capabilities + +## Running Queries on All Nodes + +```bash +~/sandboxes/all_masters_msb_8_4_8/use_all -e "SHOW SLAVE STATUS\G" | grep -E "Master_Host|Running" +``` + +## Minimum Version + +Both topologies require MySQL 5.7.9 or later. Use `dbdeployer versions` to see what is available. + +## Related Pages + +- [Replication overview](/dbdeployer/deploying/replication) +- [Group Replication](/dbdeployer/deploying/group-replication) +- [Topology reference](/dbdeployer/reference/topology-reference) diff --git a/website/src/content/docs/deploying/group-replication.md b/website/src/content/docs/deploying/group-replication.md index 98c9468..09079a7 100644 --- a/website/src/content/docs/deploying/group-replication.md +++ b/website/src/content/docs/deploying/group-replication.md @@ -1,6 +1,102 @@ --- title: Group Replication -description: Group Replication documentation +description: Deploy MySQL Group Replication clusters with dbdeployer — single-primary and multi-primary topologies. --- -Coming soon. +MySQL Group Replication (GR) is MySQL's built-in multi-master clustering technology. It provides automatic failover, conflict detection, and distributed recovery without external tools. dbdeployer makes it easy to spin up GR clusters for testing and development. + +**Minimum version:** MySQL 5.7.17+ + +## Single-Primary Mode + +In single-primary mode, one node is the primary (read/write) and the rest are secondaries (read-only). Failover is automatic — if the primary fails, the group elects a new one. + +```bash +dbdeployer deploy replication 8.4.8 --topology=group --single-primary +``` + +This creates three nodes by default: + +``` +~/sandboxes/group_sp_msb_8_4_8/ +├── node1/ # primary +├── node2/ # secondary +├── node3/ # secondary +├── check_nodes +├── start_all +├── stop_all +└── use_all +``` + +Connect to the primary: + +```bash +~/sandboxes/group_sp_msb_8_4_8/n1 -e "SELECT @@port, @@read_only" +``` + +## Multi-Primary Mode + +In multi-primary mode, all nodes accept writes simultaneously. Conflict detection handles concurrent updates to the same rows. + +```bash +dbdeployer deploy replication 8.4.8 --topology=group +``` + +All nodes are writable: + +```bash +~/sandboxes/group_msb_8_4_8/n1 -e "CREATE DATABASE test1" +~/sandboxes/group_msb_8_4_8/n2 -e "CREATE DATABASE test2" +~/sandboxes/group_msb_8_4_8/n3 -e "SELECT schema_name FROM information_schema.schemata" +``` + +## Monitoring: check_nodes + +The `check_nodes` script queries `performance_schema.replication_group_members` on each node and summarizes the group state: + +```bash +~/sandboxes/group_msb_8_4_8/check_nodes +# node 1 - ONLINE (PRIMARY) +# node 2 - ONLINE (SECONDARY) +# node 3 - ONLINE (SECONDARY) +``` + +## Available Scripts + +| Script | Purpose | +|--------|---------| +| `n1`, `n2`, `n3` | Connect to node 1, 2, 3 | +| `check_nodes` | Show group membership and role of each node | +| `start_all` | Start all nodes | +| `stop_all` | Stop all nodes | +| `use_all` | Run a query on all nodes | +| `test_replication` | Verify data propagates across all nodes | + +## Controlling the Number of Nodes + +Use `--nodes` to deploy more than three nodes: + +```bash +dbdeployer deploy replication 8.4.8 --topology=group --nodes=5 +``` + +## Concurrent Deployment + +Large clusters start faster with `--concurrent`: + +```bash +dbdeployer deploy replication 8.4.8 --topology=group --nodes=5 --concurrent +``` + +## InnoDB Cluster: the Managed Alternative + +MySQL InnoDB Cluster wraps Group Replication with MySQL Shell (for orchestration) and MySQL Router (for transparent failover routing). If you need the full managed stack, see [InnoDB Cluster](/dbdeployer/deploying/innodb-cluster). + +For plain Group Replication without the Shell/Router overhead, the `--topology=group` approach on this page is sufficient. + +## Related Pages + +- [Replication overview](/dbdeployer/deploying/replication) +- [InnoDB Cluster](/dbdeployer/deploying/innodb-cluster) +- [ProxySQL integration](/dbdeployer/providers/proxysql) +- [Topology reference](/dbdeployer/reference/topology-reference) diff --git a/website/src/content/docs/deploying/innodb-cluster.md b/website/src/content/docs/deploying/innodb-cluster.md new file mode 100644 index 0000000..696229d --- /dev/null +++ b/website/src/content/docs/deploying/innodb-cluster.md @@ -0,0 +1,136 @@ +--- +title: InnoDB Cluster +description: Deploy MySQL InnoDB Cluster with dbdeployer — Group Replication managed by MySQL Shell and routed by MySQL Router or ProxySQL. +--- + +MySQL InnoDB Cluster combines three components into a fully managed HA solution: + +- **Group Replication** — synchronous multi-master replication with automatic failover +- **MySQL Shell** (`mysqlsh`) — orchestrates cluster bootstrapping and management +- **MySQL Router** — transparent connection routing that directs reads/writes to the right node + +dbdeployer automates the entire setup. You get a working cluster with a router in one command. + +**Minimum version:** MySQL 8.0+ + +## Requirements + +Before deploying, ensure the following are installed and in your `PATH`: + +- `mysqlsh` (MySQL Shell) — required for cluster bootstrapping +- `mysqlrouter` (MySQL Router) — required unless you use `--skip-router` + +```bash +which mysqlsh mysqlrouter +mysqlsh --version +mysqlrouter --version +``` + +## Deploy an InnoDB Cluster + +```bash +dbdeployer deploy replication 8.4.8 --topology=innodb-cluster +``` + +This bootstraps a 3-node Group Replication cluster via MySQL Shell, then starts MySQL Router pointed at it. + +``` +~/sandboxes/ic_msb_8_4_8/ +├── node1/ # GR node (primary) +├── node2/ # GR node (secondary) +├── node3/ # GR node (secondary) +├── router/ # MySQL Router instance +│ ├── router_start +│ ├── router_stop +│ └── router.conf +├── check_cluster +├── start_all +├── stop_all +└── use_all +``` + +## MySQL Router Ports + +| Port | Purpose | +|------|---------| +| 6446 | Read/Write — routes to the current primary | +| 6447 | Read-Only — routes to secondaries (round-robin) | + +Connect through the router: + +```bash +# Writes (goes to primary) +mysql -h 127.0.0.1 -P 6446 -u msandbox -pmsandbox + +# Reads (goes to a secondary) +mysql -h 127.0.0.1 -P 6447 -u msandbox -pmsandbox +``` + +## Deploy Without MySQL Router + +If you don't have MySQL Router installed, or want to manage routing yourself: + +```bash +dbdeployer deploy replication 8.4.8 --topology=innodb-cluster --skip-router +``` + +No `router/` directory is created. Nodes are still bootstrapped as a Group Replication cluster via MySQL Shell. + +## Deploy with ProxySQL Instead of MySQL Router + +ProxySQL can serve as the connection router for InnoDB Cluster: + +```bash +dbdeployer deploy replication 8.4.8 --topology=innodb-cluster \ + --skip-router \ + --with-proxysql +``` + +ProxySQL is deployed alongside the cluster and configured with the cluster nodes as backends. + +For a comparison of MySQL Router vs ProxySQL for InnoDB Cluster routing, see [Topology reference](/dbdeployer/reference/topology-reference). + +## Checking Cluster Status + +```bash +~/sandboxes/ic_msb_8_4_8/check_cluster +# Cluster members: +# node1:3310 PRIMARY ONLINE +# node2:3320 SECONDARY ONLINE +# node3:3330 SECONDARY ONLINE +``` + +Or query the cluster via MySQL Shell: + +```bash +~/sandboxes/ic_msb_8_4_8/n1 -e \ + "SELECT member_host, member_port, member_role, member_state + FROM performance_schema.replication_group_members" +``` + +## Router Management + +```bash +# Start router +~/sandboxes/ic_msb_8_4_8/router/router_start + +# Stop router +~/sandboxes/ic_msb_8_4_8/router/router_stop +``` + +## Available Scripts + +| Script | Purpose | +|--------|---------| +| `n1`, `n2`, `n3` | Connect to cluster node 1, 2, 3 | +| `check_cluster` | Show cluster member status and roles | +| `start_all` / `stop_all` | Start or stop all cluster nodes | +| `use_all` | Run a query on every node | +| `router/router_start` | Start the MySQL Router | +| `router/router_stop` | Stop the MySQL Router | + +## Related Pages + +- [Group Replication](/dbdeployer/deploying/group-replication) +- [ProxySQL integration](/dbdeployer/providers/proxysql) +- [Topology reference](/dbdeployer/reference/topology-reference) diff --git a/website/src/content/docs/deploying/ndb-cluster.md b/website/src/content/docs/deploying/ndb-cluster.md index a6481f7..f27c2d5 100644 --- a/website/src/content/docs/deploying/ndb-cluster.md +++ b/website/src/content/docs/deploying/ndb-cluster.md @@ -1,6 +1,84 @@ --- title: NDB Cluster -description: NDB Cluster documentation +description: Deploy MySQL NDB Cluster sandboxes with dbdeployer using the ndb topology. --- -Coming soon. +MySQL NDB Cluster is a distributed, shared-nothing database that uses the NDB (Network DataBase) storage engine. It separates SQL processing (SQL nodes) from data storage (data nodes), allowing each layer to scale independently. dbdeployer can deploy NDB Cluster sandboxes for development and testing. + +## Requirements + +NDB Cluster requires a **MySQL Cluster tarball** — the standard MySQL Community Server will not work. Cluster-specific binaries include `ndb_mgmd` (management daemon) and `ndbd`/`ndbmtd` (data node daemons). + +Obtain a MySQL Cluster tarball from [MySQL Downloads](https://dev.mysql.com/downloads/cluster/) and unpack it: + +```bash +dbdeployer unpack mysql-cluster-8.4.8-linux-glibc2.17-x86_64.tar.gz +dbdeployer versions +# cluster_8.4.8 +``` + +dbdeployer detects that the tarball is an NDB Cluster build and marks the version accordingly. + +## Deploying an NDB Cluster + +```bash +dbdeployer deploy replication 8.4.8 --topology=ndb --ndb-nodes=3 +``` + +This deploys: +- 1 management node (`ndb_mgmd`) +- 3 data nodes (the `--ndb-nodes` count) +- 2 SQL nodes (MySQL servers that use the NDB storage engine) + +``` +~/sandboxes/ndb_msb_8_4_8/ +├── mgmd/ # NDB management node +├── ndb1/ # data node 1 +├── ndb2/ # data node 2 +├── ndb3/ # data node 3 +├── node1/ # SQL node 1 (mysqld + NDB engine) +├── node2/ # SQL node 2 (mysqld + NDB engine) +├── start_all +├── stop_all +└── check_nodes +``` + +## Data Nodes vs SQL Nodes + +**Data nodes** store and replicate the actual table data. NDB automatically partitions data across all data nodes and keeps two copies (configurable). The data nodes communicate directly with each other for synchronous replication. + +**SQL nodes** are standard `mysqld` processes with the NDB storage engine enabled. Applications connect to SQL nodes using standard MySQL clients and drivers. SQL nodes translate SQL into NDB API calls and route them to the appropriate data nodes. + +The **management node** holds the cluster configuration and monitors the health of all other nodes. It does not handle data or queries. + +## Connecting to SQL Nodes + +```bash +# Connect to SQL node 1 +~/sandboxes/ndb_msb_8_4_8/n1 -e "SHOW ENGINE NDB STATUS\G" + +# Run a query on all SQL nodes +~/sandboxes/ndb_msb_8_4_8/use_all -e "SELECT @@port, @@ndbcluster" +``` + +## Checking Cluster Status + +```bash +~/sandboxes/ndb_msb_8_4_8/check_nodes +# or connect to the management node: +~/sandboxes/ndb_msb_8_4_8/mgmd/ndb_mgm -e "ALL STATUS" +``` + +## The --ndb-nodes Flag + +| Flag | Default | Description | +|------|---------|-------------| +| `--ndb-nodes` | 3 | Number of NDB data nodes to deploy | + +Increasing `--ndb-nodes` adds data nodes, which increases both storage capacity and redundancy. + +## Related Pages + +- [Replication overview](/dbdeployer/deploying/replication) +- [Versions & Flavors](/dbdeployer/concepts/flavors) +- [Topology reference](/dbdeployer/reference/topology-reference) diff --git a/website/src/content/docs/providers/mysql.md b/website/src/content/docs/providers/mysql.md index db4eca7..36d1fe7 100644 --- a/website/src/content/docs/providers/mysql.md +++ b/website/src/content/docs/providers/mysql.md @@ -1,29 +1,128 @@ --- title: "MySQL" +description: Deploy MySQL, Percona Server, and MariaDB sandboxes with dbdeployer across all supported topologies. --- -# Standard and non-standard basedir names -[[HOME](https://github.com/ProxySQL/dbdeployer/wiki)] +The MySQL provider is the core of dbdeployer. It supports MySQL Community Server, Percona Server, and MariaDB — collectively referred to as "flavors". All replication topologies are available through this provider. -dbdeployer expects to get the binaries from ``$HOME/opt/mysql/x.x.xx``. For example, when you run the command ``dbdeployer deploy single 8.0.11``, you must have the binaries for MySQL 8.0.11 expanded into a directory named ``$HOME/opt/mysql/8.0.11``. +## Supported Flavors -If you want to keep several directories with the same version, you can differentiate them using a **prefix**: +| Flavor | Tarball prefix | Notes | +|--------|---------------|-------| +| MySQL Community Server | `mysql-` | Default; versions 5.7, 8.0, 8.4, 9.x | +| Percona Server | `Percona-Server-` | Drop-in MySQL replacement with extra features | +| MariaDB | `mariadb-` | Compatible with MySQL 5.7 API; some features differ | - $HOME/opt/mysql/ - 8.0.11 - lab_8.0.11 - ps_8.0.11 - myown_8.0.11 +dbdeployer detects the flavor from the tarball name and directory structure. You can override detection with `--flavor`: -In the above cases, running ``dbdeployer deploy single lab_8.0.11`` will do what you expect, i.e. dbdeployer will use the binaries in ``lab_8.0.11`` and recognize ``8.0.11`` as the version for the database. +```bash +dbdeployer deploy single my_custom_dir --flavor=percona +``` -When the extracted tarball directory name that you want to use doesn't contain the full version number (such as ``/home/dbuser/build/path/5.7-extra``) you need to provide the version using the option ``--binary-version``. For example: +## Binary Management - dbdeployer deploy single 5.7-extra \ - --sandbox-binary=/home/dbuser/build/path \ - --binary-version=5.7.22 +### Download a Tarball -In the above command, ``--sandbox-binary`` indicates where to search for the binaries, ``5.7-extra`` is where the binaries are, and ``--binary-version`` indicates which version should be used. +Use the built-in download registry to fetch a tarball by version: -Just to be clear, dbdeployer will recognize the directory as containing a version if it is only "x.x.x" or if it **ends** with "x.x.x" (as in ``lab_8.0.11``.) +```bash +# List available versions +dbdeployer downloads list +# Download a specific version +dbdeployer downloads get-by-version 8.4.8 + +# Download Percona Server +dbdeployer downloads get-by-version 8.0.35 --flavor=percona +``` + +### Unpack a Tarball + +Expand the tarball into the sandbox binary directory (default `~/opt/mysql/`): + +```bash +dbdeployer unpack mysql-8.4.8-linux-glibc2.17-x86_64.tar.xz + +# Verify it is available +dbdeployer versions +``` + +### Custom Binary Paths + +If your binaries are not in `~/opt/mysql/`, point dbdeployer at them: + +```bash +dbdeployer deploy single 8.4.8 --sandbox-binary=/custom/path/to/binaries +``` + +If the directory name does not follow the `x.x.xx` version format, supply the version explicitly: + +```bash +dbdeployer deploy single 5.7-extra \ + --sandbox-binary=/home/user/build \ + --binary-version=5.7.22 +``` + +### Naming Prefix + +You can keep multiple builds of the same version side-by-side using directory prefixes: + +``` +~/opt/mysql/ +├── 8.0.35 # plain +├── ps_8.0.35 # Percona Server +├── lab_8.0.35 # custom build +``` + +Deploy a prefixed version by name: + +```bash +dbdeployer deploy single ps_8.0.35 +dbdeployer deploy single lab_8.0.35 +``` + +## Supported MySQL Versions + +| Series | Status | Topologies | +|--------|--------|-----------| +| 5.7.x | Legacy | single, replication, group (5.7.17+) | +| 8.0.x | Stable | all topologies | +| 8.4.x | LTS (recommended) | all topologies | +| 9.x | Innovation | all topologies | + +## Supported Topologies + +All topologies are deployed with `dbdeployer deploy replication --topology=`: + +| Topology | Flag | Min Version | +|----------|------|-------------| +| Single | `deploy single` | any | +| Master-slave | `--topology=master-slave` (default) | any | +| Group Replication | `--topology=group` | 5.7.17 | +| Single-primary GR | `--topology=group --single-primary` | 5.7.17 | +| InnoDB Cluster | `--topology=innodb-cluster` | 8.0 | +| Fan-in | `--topology=fan-in` | 5.7.9 | +| All-masters | `--topology=all-masters` | 5.7.9 | +| NDB Cluster | `--topology=ndb` | requires NDB tarball | +| PXC | `--topology=pxc` | requires PXC tarball | + +## Flavor Detection and the --flavor Flag + +dbdeployer reads the `mysqld` binary and configuration to determine the flavor automatically. When auto-detection is insufficient (e.g., custom builds), specify it manually: + +```bash +dbdeployer deploy single my_build --flavor=mysql +dbdeployer deploy single my_ps_build --flavor=percona +dbdeployer deploy single my_maria --flavor=mariadb +``` + +Flavor affects which features are enabled, default configuration, and which topology options are available. + +## Related Pages + +- [Versions & Flavors](/dbdeployer/concepts/flavors) +- [Replication overview](/dbdeployer/deploying/replication) +- [Group Replication](/dbdeployer/deploying/group-replication) +- [InnoDB Cluster](/dbdeployer/deploying/innodb-cluster) +- [NDB Cluster](/dbdeployer/deploying/ndb-cluster) +- [Percona XtraDB Cluster](/dbdeployer/providers/pxc) diff --git a/website/src/content/docs/providers/pxc.md b/website/src/content/docs/providers/pxc.md index 9b50158..3599651 100644 --- a/website/src/content/docs/providers/pxc.md +++ b/website/src/content/docs/providers/pxc.md @@ -1,6 +1,90 @@ --- title: Percona XtraDB Cluster -description: Percona XtraDB Cluster documentation +description: Deploy Percona XtraDB Cluster (PXC) sandboxes with dbdeployer using Galera-based synchronous replication. --- -Coming soon. +Percona XtraDB Cluster (PXC) is a high-availability MySQL solution built on Galera replication. Unlike asynchronous MySQL replication, Galera replicates synchronously — every node has the same data at all times, and all nodes are writable. dbdeployer can deploy PXC clusters for development and testing. + +## Requirements + +PXC requires a **PXC-specific tarball** — the standard MySQL or Percona Server tarballs will not work. PXC binaries include the Galera library and the `wsrep` plugin. + +Download a PXC tarball from [Percona Downloads](https://www.percona.com/downloads/Percona-XtraDB-Cluster-LATEST/) and unpack it: + +```bash +dbdeployer unpack Percona-XtraDB-Cluster-8.0.35-27.1-Linux.x86_64.glibc2.17.tar.gz +dbdeployer versions +# pxc8.0.35 +``` + +dbdeployer detects that the tarball is a PXC build and prefixes the version with `pxc`. + +## Deploying a PXC Cluster + +```bash +dbdeployer deploy replication pxc8.0.35 --topology=pxc +``` + +Default: 3 nodes, all writable. + +``` +~/sandboxes/pxc_msb_8_0_35/ +├── node1/ # writable Galera node +├── node2/ # writable Galera node +├── node3/ # writable Galera node +├── check_nodes +├── start_all +├── stop_all +└── use_all +``` + +## How Galera Replication Works + +PXC uses the Galera wsrep (write-set replication) protocol: + +1. A transaction is executed on any node. +2. Before commit, the write set is broadcast to all other nodes. +3. All nodes certify the write set for conflicts. +4. If no conflicts, the transaction commits on all nodes simultaneously. +5. If a conflict is detected, the transaction is rolled back on the originating node. + +This means writes are slower than asynchronous replication (due to network round-trips), but every node is always consistent. + +## All Nodes Are Writable + +Unlike standard replication where only the master accepts writes, every PXC node can handle writes: + +```bash +~/sandboxes/pxc_msb_8_0_35/n1 -e "CREATE TABLE test.t1 (id INT PRIMARY KEY)" +~/sandboxes/pxc_msb_8_0_35/n2 -e "INSERT INTO test.t1 VALUES (1)" +~/sandboxes/pxc_msb_8_0_35/n3 -e "SELECT * FROM test.t1" +# Returns: 1 +``` + +## Checking Cluster Status + +```bash +~/sandboxes/pxc_msb_8_0_35/check_nodes +# node 1 - wsrep_cluster_size=3 wsrep_local_state_comment=Synced +# node 2 - wsrep_cluster_size=3 wsrep_local_state_comment=Synced +# node 3 - wsrep_cluster_size=3 wsrep_local_state_comment=Synced +``` + +Or query wsrep status directly: + +```bash +~/sandboxes/pxc_msb_8_0_35/n1 -e "SHOW STATUS LIKE 'wsrep_%'" +``` + +## Running Queries on All Nodes + +```bash +~/sandboxes/pxc_msb_8_0_35/use_all -e "SELECT @@port, @@wsrep_on" +``` + +## Related Pages + +- [Replication overview](/dbdeployer/deploying/replication) +- [Versions & Flavors](/dbdeployer/concepts/flavors) +- [Topology reference](/dbdeployer/reference/topology-reference) +- [MySQL provider](/dbdeployer/providers/mysql)