From f0ad03804c635a6ccea57891067ed928b9b2eac6 Mon Sep 17 00:00:00 2001 From: Rene Cannao Date: Thu, 2 Apr 2026 07:48:17 +0000 Subject: [PATCH 1/3] feat: add InnoDB Cluster topology (--topology=innodb-cluster) Deploy Group Replication + MySQL Shell cluster management + MySQL Router: dbdeployer deploy replication 8.4.4 --topology=innodb-cluster - MySQL Shell bootstraps the cluster via AdminAPI (dba.createCluster) - MySQL Router provides transparent connection routing - --skip-router flag to deploy without Router (useful for ProxySQL) - Requires mysqlsh binary; mysqlrouter optional - Minimum MySQL 8.0.11 required (MySQLX default capability) - Reuses existing GR node creation and template infrastructure New files: sandbox/innodb_cluster.go - CreateInnoDBCluster() implementation sandbox/cluster_templates.go - template embeds for cluster scripts sandbox/templates/cluster/ - init_cluster, check_cluster, router scripts Modified: globals/globals.go - InnoDBClusterLabel, AllowedTopologies, script names globals/template_names.go - template name constants defaults/defaults.go - InnoDBClusterPrefix, InnoDBClusterBasePort sandbox/replication.go - dispatch for innodb-cluster topology sandbox/templates.go - register InnoDBClusterTemplates in AllTemplates cmd/replication.go - --skip-router flag, help text, examples --- cmd/replication.go | 12 +- defaults/defaults.go | 15 + globals/globals.go | 9 + globals/template_names.go | 7 + sandbox/cluster_templates.go | 72 ++ sandbox/innodb_cluster.go | 622 ++++++++++++++++++ sandbox/replication.go | 13 + sandbox/templates.go | 1 + sandbox/templates/cluster/check_cluster.gotxt | 8 + sandbox/templates/cluster/init_cluster.gotxt | 38 ++ .../cluster/innodb_cluster_options.gotxt | 10 + sandbox/templates/cluster/router_start.gotxt | 11 + sandbox/templates/cluster/router_stop.gotxt | 13 + 13 files changed, 829 insertions(+), 2 deletions(-) create mode 100644 sandbox/cluster_templates.go create mode 100644 sandbox/innodb_cluster.go create mode 100644 sandbox/templates/cluster/check_cluster.gotxt create mode 100644 sandbox/templates/cluster/init_cluster.gotxt create mode 100644 sandbox/templates/cluster/innodb_cluster_options.gotxt create mode 100644 sandbox/templates/cluster/router_start.gotxt create mode 100644 sandbox/templates/cluster/router_stop.gotxt diff --git a/cmd/replication.go b/cmd/replication.go index 136e637..6819c6d 100644 --- a/cmd/replication.go +++ b/cmd/replication.go @@ -241,6 +241,8 @@ func replicationSandbox(cmd *cobra.Command, args []string) { globals.NdbLabel) } + skipRouter, _ := flags.GetBool(globals.SkipRouterLabel) + origin := args[0] if args[0] != sd.BasedirName { origin = sd.BasedirName @@ -252,7 +254,8 @@ func replicationSandbox(cmd *cobra.Command, args []string) { NdbNodes: ndbNodes, MasterIp: masterIp, MasterList: masterList, - SlaveList: slaveList}) + SlaveList: slaveList, + SkipRouter: skipRouter}) if err != nil { common.Exitf(1, globals.ErrCreatingSandbox, err) } @@ -296,7 +299,9 @@ var replicationCmd = &cobra.Command{ Long: `The replication command allows you to deploy several nodes in replication. Allowed topologies are "master-slave" for all versions, and "group", "all-masters", "fan-in" for 5.7.17+. -Topologies "pcx" and "ndb" are available for binaries of type Percona Xtradb Cluster and MySQL Cluster. +Topologies "pxc" and "ndb" are available for binaries of type Percona Xtradb Cluster and MySQL Cluster. +Topology "innodb-cluster" deploys Group Replication managed by MySQL Shell AdminAPI with optional +MySQL Router for connection routing (requires MySQL 8.0.11+ and mysqlsh). For this command to work, there must be a directory $HOME/opt/mysql/5.7.21, containing the binary files from mysql-5.7.21-$YOUR_OS-x86_64.tar.gz Use the "unpack" command to get the tarball into the right directory. @@ -321,6 +326,8 @@ Use the "unpack" command to get the tarball into the right directory. $ dbdeployer deploy --topology=fan-in replication 5.7 $ dbdeployer deploy --topology=pxc replication pxc5.7.25 $ dbdeployer deploy --topology=ndb replication ndb8.0.14 + $ dbdeployer deploy --topology=innodb-cluster replication 8.4.4 + $ dbdeployer deploy --topology=innodb-cluster replication 8.4.4 --skip-router `, Annotations: map[string]string{"export": ExportAnnotationToJson(ReplicationExport)}, } @@ -339,6 +346,7 @@ func init() { replicationCmd.PersistentFlags().BoolP(globals.SuperReadOnlyLabel, "", false, "Set super-read-only for slaves") replicationCmd.PersistentFlags().Bool(globals.ReplHistoryDirLabel, false, "uses the replication directory to store mysql client history") setPflag(replicationCmd, globals.ChangeMasterOptions, "", "CHANGE_MASTER_OPTIONS", "", "options to add to CHANGE MASTER TO", true) + replicationCmd.PersistentFlags().Bool(globals.SkipRouterLabel, false, "Skip MySQL Router deployment for InnoDB Cluster topology") replicationCmd.PersistentFlags().Bool("with-proxysql", false, "Deploy ProxySQL alongside the replication sandbox") replicationCmd.PersistentFlags().String(globals.ProviderLabel, globals.ProviderValue, "Database provider (mysql, postgresql)") } diff --git a/defaults/defaults.go b/defaults/defaults.go index 95eea80..200d076 100644 --- a/defaults/defaults.go +++ b/defaults/defaults.go @@ -66,6 +66,8 @@ type DbdeployerDefaults struct { RemoteTarballUrl string `json:"remote-tarball-url"` PxcPrefix string `json:"pxc-prefix"` NdbPrefix string `json:"ndb-prefix"` + InnoDBClusterPrefix string `json:"innodb-cluster-prefix"` + InnoDBClusterBasePort int `json:"innodb-cluster-base-port"` DefaultSandboxExecutable string `json:"default-sandbox-executable"` DownloadNameLinux string `json:"download-name-linux"` DownloadNameMacOs string `json:"download-name-macos"` @@ -134,6 +136,8 @@ var ( RemoteTarballUrl: "https://raw.githubusercontent.com/datacharmer/dbdeployer/master/downloads/tarball_list.json", NdbPrefix: "ndb_msb_", PxcPrefix: "pxc_msb_", + InnoDBClusterPrefix: "ic_msb_", + InnoDBClusterBasePort: 21000, DefaultSandboxExecutable: "default", DownloadNameLinux: "mysql-{{.Version}}-linux-glibc2.17-x86_64{{.Minimal}}.{{.Ext}}", DownloadNameMacOs: "mysql-{{.Version}}-macos11-x86_64.{{.Ext}}", @@ -226,6 +230,7 @@ func ValidateDefaults(nd DbdeployerDefaults) bool { checkInt("pxc-base-port", nd.PxcBasePort, minPortValue, maxPortValue) && checkInt("ndb-base-port", nd.NdbBasePort, minPortValue, maxPortValue) && checkInt("ndb-cluster-port", nd.NdbClusterPort, minPortValue, maxPortValue) && + checkInt("innodb-cluster-base-port", nd.InnoDBClusterBasePort, minPortValue, maxPortValue) && checkInt("group-port-delta", nd.GroupPortDelta, 101, 299) && checkInt("mysqlx-port-delta", nd.MysqlXPortDelta, 2000, 15000) && checkInt("admin-port-delta", nd.AdminPortDelta, 2000, 15000) @@ -250,6 +255,7 @@ func ValidateDefaults(nd DbdeployerDefaults) bool { nd.MasterAbbr != nd.SlaveAbbr && nd.MultiplePrefix != nd.NdbPrefix && nd.MultiplePrefix != nd.PxcPrefix && + nd.MultiplePrefix != nd.InnoDBClusterPrefix && nd.SandboxHome != nd.SandboxBinary if !noConflicts { common.CondPrintf("Conflicts found in defaults values:\n") @@ -270,6 +276,7 @@ func ValidateDefaults(nd DbdeployerDefaults) bool { nd.MultiplePrefix != "" && nd.PxcPrefix != "" && nd.NdbPrefix != "" && + nd.InnoDBClusterPrefix != "" && nd.DefaultSandboxExecutable != "" && nd.DownloadUrl != "" && nd.DownloadNameLinux != "" && @@ -403,6 +410,10 @@ func UpdateDefaults(label, value string, storeDefaults bool) { newDefaults.PxcPrefix = value case "ndb-prefix": newDefaults.NdbPrefix = value + case "innodb-cluster-prefix": + newDefaults.InnoDBClusterPrefix = value + case "innodb-cluster-base-port": + newDefaults.InnoDBClusterBasePort = common.Atoi(value) case "default-sandbox-executable": newDefaults.DefaultSandboxExecutable = value case "download-url": @@ -538,6 +549,10 @@ func DefaultsToMap() common.StringMap { "pxc-prefix": currentDefaults.PxcPrefix, "NdbPrefix": currentDefaults.NdbPrefix, "ndb-prefix": currentDefaults.NdbPrefix, + "InnoDBClusterPrefix": currentDefaults.InnoDBClusterPrefix, + "innodb-cluster-prefix": currentDefaults.InnoDBClusterPrefix, + "InnoDBClusterBasePort": currentDefaults.InnoDBClusterBasePort, + "innodb-cluster-base-port": currentDefaults.InnoDBClusterBasePort, "DefaultSandboxExecutable": currentDefaults.DefaultSandboxExecutable, "default-sandbox-executable": currentDefaults.DefaultSandboxExecutable, "download-url": currentDefaults.DownloadUrl, diff --git a/globals/globals.go b/globals/globals.go index 790306e..86062b2 100644 --- a/globals/globals.go +++ b/globals/globals.go @@ -179,7 +179,9 @@ const ( TopologyValue = "master-slave" PxcLabel = "pxc" NdbLabel = "ndb" + InnoDBClusterLabel = "innodb-cluster" ChangeMasterOptions = "change-master-options" + SkipRouterLabel = "skip-router" // Instantiated in cmd/unpack.go and unpack/unpack.go GzExt = ".gz" @@ -320,6 +322,12 @@ const ( ScriptCheckSlaves = "check_slaves" ScriptUseAllMasters = "use_all_masters" ScriptUseAllSlaves = "use_all_slaves" + + // InnoDB Cluster scripts + ScriptInitCluster = "init_cluster" + ScriptCheckCluster = "check_cluster" + ScriptRouterStart = "router_start" + ScriptRouterStop = "router_stop" ) // Common error messages @@ -475,6 +483,7 @@ var AllowedTopologies = []string{ FanInLabel, AllMastersLabel, NdbLabel, + InnoDBClusterLabel, } // This structure is not used directly by dbdeployer. diff --git a/globals/template_names.go b/globals/template_names.go index d54d1a4..2c1034b 100644 --- a/globals/template_names.go +++ b/globals/template_names.go @@ -171,6 +171,13 @@ const ( TmplInitNodes84 = "init_nodes84" TmplGroupReplOptions84 = "group_repl_options84" + // innodb_cluster + TmplInnoDBClusterOptions = "innodb_cluster_options" + TmplInitCluster = "init_cluster" + TmplCheckCluster = "check_cluster" + TmplRouterStart = "router_start" + TmplRouterStop = "router_stop" + // MySQL 8.4+ specific templates TmplInitSlaves84 = "init_slaves_84" TmplReplCrashSafeOptions84 = "repl_crash_safe_options84" diff --git a/sandbox/cluster_templates.go b/sandbox/cluster_templates.go new file mode 100644 index 0000000..93b6aac --- /dev/null +++ b/sandbox/cluster_templates.go @@ -0,0 +1,72 @@ +// DBDeployer - The MySQL Sandbox +// Copyright © 2006-2026 Giuseppe Maxia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.16 +// +build go1.16 + +package sandbox + +import ( + _ "embed" + + "github.com/ProxySQL/dbdeployer/globals" +) + +// Templates for InnoDB Cluster + +var ( + //go:embed templates/cluster/innodb_cluster_options.gotxt + innodbClusterOptionsTemplate string + + //go:embed templates/cluster/init_cluster.gotxt + initClusterTemplate string + + //go:embed templates/cluster/check_cluster.gotxt + checkClusterTemplate string + + //go:embed templates/cluster/router_start.gotxt + routerStartTemplate string + + //go:embed templates/cluster/router_stop.gotxt + routerStopTemplate string + + InnoDBClusterTemplates = TemplateCollection{ + globals.TmplInnoDBClusterOptions: TemplateDesc{ + Description: "MySQL server options for InnoDB Cluster nodes", + Notes: "Enables GTID, report_host, and disables non-InnoDB storage engines", + Contents: innodbClusterOptionsTemplate, + }, + globals.TmplInitCluster: TemplateDesc{ + Description: "Initialize InnoDB Cluster via MySQL Shell", + Notes: "Uses dba.createCluster() and cluster.addInstance()", + Contents: initClusterTemplate, + }, + globals.TmplCheckCluster: TemplateDesc{ + Description: "Check InnoDB Cluster status via MySQL Shell", + Notes: "", + Contents: checkClusterTemplate, + }, + globals.TmplRouterStart: TemplateDesc{ + Description: "Start MySQL Router for InnoDB Cluster", + Notes: "", + Contents: routerStartTemplate, + }, + globals.TmplRouterStop: TemplateDesc{ + Description: "Stop MySQL Router for InnoDB Cluster", + Notes: "", + Contents: routerStopTemplate, + }, + } +) diff --git a/sandbox/innodb_cluster.go b/sandbox/innodb_cluster.go new file mode 100644 index 0000000..f94cfe7 --- /dev/null +++ b/sandbox/innodb_cluster.go @@ -0,0 +1,622 @@ +// DBDeployer - The MySQL Sandbox +// Copyright © 2006-2026 Giuseppe Maxia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sandbox + +import ( + "fmt" + "os" + "path" + "regexp" + "time" + + "github.com/ProxySQL/dbdeployer/common" + "github.com/ProxySQL/dbdeployer/concurrent" + "github.com/ProxySQL/dbdeployer/defaults" + "github.com/ProxySQL/dbdeployer/globals" + "github.com/dustin/go-humanize/english" + "github.com/pkg/errors" +) + +// findMysqlShell locates the mysqlsh binary. It first checks the basedir/bin +// directory, then falls back to the system PATH. +func findMysqlShell(basedir string) (string, error) { + mysqlshPath := path.Join(basedir, "bin", "mysqlsh") + if common.ExecExists(mysqlshPath) { + return mysqlshPath, nil + } + // Check if mysqlsh is available on PATH + mysqlshPath = "mysqlsh" + if common.ExecExists(mysqlshPath) { + return mysqlshPath, nil + } + fullPath := common.FindInPath("mysqlsh") + if fullPath != "" { + return fullPath, nil + } + return "", fmt.Errorf("mysqlsh not found in %s/bin or in PATH. "+ + "MySQL Shell is required for InnoDB Cluster deployment. "+ + "Install it from https://dev.mysql.com/downloads/shell/", basedir) +} + +// findMysqlRouter locates the mysqlrouter binary. It first checks the basedir/bin +// directory, then falls back to the system PATH. +func findMysqlRouter(basedir string) (string, error) { + routerPath := path.Join(basedir, "bin", "mysqlrouter") + if common.ExecExists(routerPath) { + return routerPath, nil + } + routerPath = "mysqlrouter" + if common.ExecExists(routerPath) { + return routerPath, nil + } + fullPath := common.FindInPath("mysqlrouter") + if fullPath != "" { + return fullPath, nil + } + return "", fmt.Errorf("mysqlrouter not found in %s/bin or in PATH. "+ + "Use --skip-router to deploy without MySQL Router", basedir) +} + +// CreateInnoDBCluster creates an InnoDB Cluster sandbox with the given number of nodes. +// It creates nodes using the same approach as Group Replication, then uses MySQL Shell +// to bootstrap the cluster via the AdminAPI (dba.createCluster / cluster.addInstance). +// Optionally, MySQL Router is bootstrapped for transparent connection routing. +func CreateInnoDBCluster(sandboxDef SandboxDef, origin string, nodes int, masterIp string, skipRouter bool) error { + var execLists []concurrent.ExecutionList + + var logger *defaults.Logger + if sandboxDef.Logger != nil { + logger = sandboxDef.Logger + } else { + var fileName string + var err error + logger, fileName, err = defaults.NewLogger(common.LogDirName(), "innodb-cluster") + if err != nil { + return err + } + sandboxDef.LogFileName = common.ReplaceLiteralHome(fileName) + } + + readOnlyOptions, err := checkReadOnlyFlags(sandboxDef) + if err != nil { + return err + } + if readOnlyOptions != "" { + return fmt.Errorf("options --read-only and --super-read-only can't be used for InnoDB Cluster topology\n" + + "as the cluster software sets it when needed") + } + + // InnoDB Cluster requires MySQL 8.0+ + isMinimumVersion, err := common.HasCapability(sandboxDef.Flavor, common.MySQLXDefault, sandboxDef.Version) + if err != nil { + return err + } + if !isMinimumVersion { + return fmt.Errorf("InnoDB Cluster requires MySQL 8.0.11 or later (current: %s)", sandboxDef.Version) + } + + // Find mysqlsh - it is required + mysqlshPath, err := findMysqlShell(sandboxDef.Basedir) + if err != nil { + return err + } + logger.Printf("Using MySQL Shell: %s\n", mysqlshPath) + + // Find mysqlrouter - optional if --skip-router is set + var mysqlrouterPath string + if !skipRouter { + mysqlrouterPath, err = findMysqlRouter(sandboxDef.Basedir) + if err != nil { + return err + } + logger.Printf("Using MySQL Router: %s\n", mysqlrouterPath) + } + + vList, err := common.VersionToList(sandboxDef.Version) + if err != nil { + return err + } + rev := vList[2] + basePort := computeBaseport(sandboxDef.Port + defaults.Defaults().InnoDBClusterBasePort + (rev * 100)) + if sandboxDef.BasePort > 0 { + basePort = sandboxDef.BasePort + } + + if nodes < 3 { + return fmt.Errorf("can't run InnoDB Cluster with less than 3 nodes") + } + if common.DirExists(sandboxDef.SandboxDir) { + sandboxDef, err = checkDirectory(sandboxDef) + if err != nil { + return err + } + } + + // Allocate MySQL ports + firstPort, err := common.FindFreePort(basePort+1, sandboxDef.InstalledPorts, nodes) + if err != nil { + return errors.Wrapf(err, "error retrieving free port for InnoDB Cluster") + } + basePort = firstPort - 1 + + // Allocate GR communication ports + baseGroupPort := basePort + defaults.Defaults().GroupPortDelta + firstGroupPort, err := common.FindFreePort(baseGroupPort+1, sandboxDef.InstalledPorts, nodes) + if err != nil { + return errors.Wrapf(err, "error retrieving group replication free port") + } + baseGroupPort = firstGroupPort - 1 + + for checkPort := basePort + 1; checkPort < basePort+nodes+1; checkPort++ { + err = checkPortAvailability("CreateInnoDBCluster", sandboxDef.SandboxDir, sandboxDef.InstalledPorts, checkPort) + if err != nil { + return err + } + } + for checkPort := baseGroupPort + 1; checkPort < baseGroupPort+nodes+1; checkPort++ { + err = checkPortAvailability("CreateInnoDBCluster-group", sandboxDef.SandboxDir, sandboxDef.InstalledPorts, checkPort) + if err != nil { + return err + } + } + + baseMysqlxPort, err := getBaseMysqlxPort(basePort, sandboxDef, nodes) + if err != nil { + return err + } + baseAdminPort, err := getBaseAdminPort(basePort, sandboxDef, nodes) + if err != nil { + return err + } + + err = os.Mkdir(sandboxDef.SandboxDir, globals.PublicDirectoryAttr) + if err != nil { + return err + } + common.AddToCleanupStack(common.RmdirAll, "RmdirAll", sandboxDef.SandboxDir) + logger.Printf("Creating directory %s\n", sandboxDef.SandboxDir) + + timestamp := time.Now() + slaveLabel := defaults.Defaults().SlavePrefix + slaveAbbr := defaults.Defaults().SlaveAbbr + masterAbbr := defaults.Defaults().MasterAbbr + masterLabel := defaults.Defaults().MasterName + masterList := makeNodesList(nodes) + slaveList := masterList + + // InnoDB Cluster always uses single-primary mode by default + // The primary is node 1, the rest are secondaries + masterList = "1" + slaveList = "" + for N := 2; N <= nodes; N++ { + if slaveList != "" { + slaveList += " " + } + slaveList += fmt.Sprintf("%d", N) + } + + changeMasterExtra := setChangeMasterProperties("", sandboxDef.ChangeMasterOptions, logger) + nodeLabel := defaults.Defaults().NodePrefix + stopNodeList := "" + for i := nodes; i > 0; i-- { + stopNodeList += fmt.Sprintf(" %d", i) + } + + replCmds := replicationCommands(sandboxDef.Version) + + // Build the connection string for GR seeds + connectionString := "" + for i := 0; i < nodes; i++ { + groupPort := baseGroupPort + i + 1 + if connectionString != "" { + connectionString += "," + } + connectionString += fmt.Sprintf("127.0.0.1:%d", groupPort) + } + logger.Printf("Creating connection string %s\n", connectionString) + + routerDir := path.Join(sandboxDef.SandboxDir, "router") + + var data = common.StringMap{ + "ShellPath": sandboxDef.ShellPath, + "Copyright": globals.ShellScriptCopyright, + "AppVersion": common.VersionDef, + "DateTime": timestamp.Format(time.UnixDate), + "SandboxDir": sandboxDef.SandboxDir, + "MasterIp": masterIp, + "MasterList": masterList, + "NodeLabel": nodeLabel, + "SlaveList": slaveList, + "RplUser": sandboxDef.RplUser, + "RplPassword": sandboxDef.RplPassword, + "SlaveLabel": slaveLabel, + "SlaveAbbr": slaveAbbr, + "ChangeMasterExtra": changeMasterExtra, + "MasterLabel": masterLabel, + "MasterAbbr": masterAbbr, + "StopNodeList": stopNodeList, + "Nodes": []common.StringMap{}, + // InnoDB Cluster specific + "MysqlShell": mysqlshPath, + "PrimaryPort": basePort + 1, + "ClusterName": "mySandboxCluster", + "DbPassword": sandboxDef.DbPassword, + "RouterDir": routerDir, + "Replicas": []common.StringMap{}, + } + data["ChangeMasterTo"] = replCmds["ChangeMasterTo"] + data["MasterUserParam"] = replCmds["MasterUserParam"] + data["MasterPasswordParam"] = replCmds["MasterPasswordParam"] + data["StartReplica"] = replCmds["StartReplica"] + data["StopReplica"] = replCmds["StopReplica"] + data["ResetMasterCmd"] = replCmds["ResetMasterCmd"] + + sbType := "innodb-cluster" + logger.Printf("Defining cluster type %s\n", sbType) + + sbDesc := common.SandboxDescription{ + Basedir: sandboxDef.Basedir, + SBType: sbType, + Version: sandboxDef.Version, + Flavor: sandboxDef.Flavor, + Port: []int{}, + Nodes: nodes, + NodeNum: 0, + LogFile: sandboxDef.LogFileName, + } + + sbItem := defaults.SandboxItem{ + Origin: sbDesc.Basedir, + SBType: sbDesc.SBType, + Version: sandboxDef.Version, + Flavor: sandboxDef.Flavor, + Port: []int{}, + Nodes: []string{}, + Destination: sandboxDef.SandboxDir, + } + + if sandboxDef.LogFileName != "" { + sbItem.LogDirectory = common.DirName(sandboxDef.LogFileName) + } + + // Version-aware group replication init template + initNodesTmpl := globals.TmplInitNodes + isMySQL84, _ := common.GreaterOrEqualVersion(sandboxDef.Version, globals.MinimumResetBinaryLogsVersion) + if isMySQL84 { + initNodesTmpl = globals.TmplInitNodes84 + } + + for i := 1; i <= nodes; i++ { + groupPort := baseGroupPort + i + sandboxDef.Port = basePort + i + data["Nodes"] = append(data["Nodes"].([]common.StringMap), common.StringMap{ + "ShellPath": sandboxDef.ShellPath, + "Copyright": globals.ShellScriptCopyright, + "AppVersion": common.VersionDef, + "DateTime": timestamp.Format(time.UnixDate), + "Node": i, + "NodePort": sandboxDef.Port, + "MasterIp": masterIp, + "NodeLabel": nodeLabel, + "SlaveLabel": slaveLabel, + "SlaveAbbr": slaveAbbr, + "ChangeMasterExtra": changeMasterExtra, + "ChangeMasterTo": replCmds["ChangeMasterTo"], + "MasterUserParam": replCmds["MasterUserParam"], + "MasterPasswordParam": replCmds["MasterPasswordParam"], + "ResetMasterCmd": replCmds["ResetMasterCmd"], + "MasterLabel": masterLabel, + "MasterAbbr": masterAbbr, + "SandboxDir": sandboxDef.SandboxDir, + "StopNodeList": stopNodeList, + "RplUser": sandboxDef.RplUser, + "RplPassword": sandboxDef.RplPassword, + }) + + // Build replica list for init_cluster template (nodes 2..N) + if i > 1 { + data["Replicas"] = append(data["Replicas"].([]common.StringMap), common.StringMap{ + "Port": sandboxDef.Port, + }) + } + + sandboxDef.DirName = fmt.Sprintf("%s%d", nodeLabel, i) + sandboxDef.MorePorts = []int{groupPort} + sandboxDef.ServerId = setServerId(sandboxDef, i) + sbItem.Nodes = append(sbItem.Nodes, sandboxDef.DirName) + sbItem.Port = append(sbItem.Port, sandboxDef.Port) + sbDesc.Port = append(sbDesc.Port, sandboxDef.Port) + sbItem.Port = append(sbItem.Port, groupPort) + sbDesc.Port = append(sbDesc.Port, groupPort) + + if !sandboxDef.RunConcurrently { + installationMessage := "Installing and starting %s %d\n" + if sandboxDef.SkipStart { + installationMessage = "Installing %s %d\n" + } + common.CondPrintf(installationMessage, nodeLabel, i) + logger.Printf(installationMessage, nodeLabel, i) + } + + basePortText := fmt.Sprintf("%08d", basePort) + + // Version-aware options for group replication + useReplicaUpdates, _ := common.GreaterOrEqualVersion(sandboxDef.Version, globals.MinimumShowReplicaStatusVersion) + useNoWriteSetExtraction, _ := common.GreaterOrEqualVersion(sandboxDef.Version, globals.MinimumNoWriteSetExtractionVersion) + useMySQL84GroupOptions, _ := common.GreaterOrEqualVersion(sandboxDef.Version, globals.MinimumResetBinaryLogsVersion) + + replicationData := common.StringMap{ + "BasePort": basePortText, + "GroupSeeds": connectionString, + "LocalAddresses": fmt.Sprintf("%s:%d", masterIp, groupPort), + "PrimaryMode": "on", // InnoDB Cluster defaults to single-primary + "UseReplicaUpdates": useReplicaUpdates, + "SkipWriteSetExtraction": useNoWriteSetExtraction, + } + + // Use the same GR options templates as group replication + groupReplOptionsTmpl := globals.TmplGroupReplOptions + if useMySQL84GroupOptions { + groupReplOptionsTmpl = globals.TmplGroupReplOptions84 + } + replOptionsText, err := common.SafeTemplateFill("innodb_cluster_gr", + GroupTemplates[groupReplOptionsTmpl].Contents, replicationData) + if err != nil { + return err + } + sandboxDef.ReplOptions = SingleTemplates[globals.TmplReplicationOptions].Contents + "\n" + replOptionsText + + reMasterIp := regexp.MustCompile(`127\.0\.0\.1`) + sandboxDef.ReplOptions = reMasterIp.ReplaceAllString(sandboxDef.ReplOptions, masterIp) + + sandboxDef.ReplOptions += fmt.Sprintf("\n%s\n", SingleTemplates[globals.TmplGtidOptions57].Contents) + if useMySQL84GroupOptions { + sandboxDef.ReplOptions += fmt.Sprintf("\n%s\n", SingleTemplates[globals.TmplReplCrashSafeOptions84].Contents) + } else { + sandboxDef.ReplOptions += fmt.Sprintf("\n%s\n", SingleTemplates[globals.TmplReplCrashSafeOptions].Contents) + } + + // MySQLX port (required for InnoDB Cluster / MySQL Shell) + isMinimumMySQLXDefault, err := common.HasCapability(sandboxDef.Flavor, common.MySQLXDefault, sandboxDef.Version) + if err != nil { + return err + } + if isMinimumMySQLXDefault || sandboxDef.EnableMysqlX { + sandboxDef.MysqlXPort = baseMysqlxPort + i + if !sandboxDef.DisableMysqlX { + sbDesc.Port = append(sbDesc.Port, baseMysqlxPort+i) + sbItem.Port = append(sbItem.Port, baseMysqlxPort+i) + logger.Printf("adding mysqlx port %d to node %d\n", baseMysqlxPort+i, i) + } + } + if sandboxDef.EnableAdminAddress { + sandboxDef.AdminPort = baseAdminPort + i + sbDesc.Port = append(sbDesc.Port, baseAdminPort+i) + sbItem.Port = append(sbItem.Port, baseAdminPort+i) + logger.Printf("adding admin port %d to node %d\n", baseAdminPort+i, i) + } + + sandboxDef.Multi = true + sandboxDef.LoadGrants = true + sandboxDef.Prompt = fmt.Sprintf("%s%d", nodeLabel, i) + sandboxDef.SBType = "innodb-cluster-node" + sandboxDef.NodeNum = i + logger.Printf("Create single sandbox for node %d\n", i) + execList, err := CreateChildSandbox(sandboxDef) + if err != nil { + return fmt.Errorf(globals.ErrCreatingSandbox, err) + } + execLists = append(execLists, execList...) + + var dataNode = common.StringMap{ + "ShellPath": sandboxDef.ShellPath, + "Copyright": globals.ShellScriptCopyright, + "AppVersion": common.VersionDef, + "DateTime": timestamp.Format(time.UnixDate), + "Node": i, + "NodePort": sandboxDef.Port, + "NodeLabel": nodeLabel, + "MasterLabel": masterLabel, + "MasterAbbr": masterAbbr, + "ChangeMasterExtra": changeMasterExtra, + "ChangeMasterTo": replCmds["ChangeMasterTo"], + "MasterUserParam": replCmds["MasterUserParam"], + "MasterPasswordParam": replCmds["MasterPasswordParam"], + "ResetMasterCmd": replCmds["ResetMasterCmd"], + "SlaveLabel": slaveLabel, + "SlaveAbbr": slaveAbbr, + "SandboxDir": sandboxDef.SandboxDir, + } + logger.Printf("Create node script for node %d\n", i) + err = writeScript(logger, MultipleTemplates, fmt.Sprintf("n%d", i), globals.TmplNode, sandboxDef.SandboxDir, dataNode, true) + if err != nil { + return err + } + if sandboxDef.EnableAdminAddress { + err = writeScript(logger, MultipleTemplates, fmt.Sprintf("na%d", i), globals.TmplNodeAdmin, sandboxDef.SandboxDir, dataNode, true) + if err != nil { + return err + } + } + } + + logger.Printf("Writing sandbox description in %s\n", sandboxDef.SandboxDir) + err = common.WriteSandboxDescription(sandboxDef.SandboxDir, sbDesc) + if err != nil { + return errors.Wrapf(err, "unable to write sandbox description") + } + err = defaults.UpdateCatalog(sandboxDef.SandboxDir, sbItem) + if err != nil { + return errors.Wrapf(err, "unable to update catalog") + } + + slavePlural := english.PluralWord(2, slaveLabel, "") + masterPlural := english.PluralWord(2, masterLabel, "") + useAllMasters := "use_all_" + masterPlural + useAllSlaves := "use_all_" + slavePlural + execAllSlaves := "exec_all_" + slavePlural + execAllMasters := "exec_all_" + masterPlural + + logger.Printf("Writing InnoDB Cluster scripts\n") + sbMultiple := ScriptBatch{ + tc: MultipleTemplates, + logger: logger, + data: data, + sandboxDir: sandboxDef.SandboxDir, + scripts: []ScriptDef{ + {globals.ScriptStartAll, globals.TmplStartMulti, true}, + {globals.ScriptRestartAll, globals.TmplRestartMulti, true}, + {globals.ScriptStatusAll, globals.TmplStatusMulti, true}, + {globals.ScriptTestSbAll, globals.TmplTestSbMulti, true}, + {globals.ScriptStopAll, globals.TmplStopMulti, true}, + {globals.ScriptClearAll, globals.TmplClearMulti, true}, + {globals.ScriptSendKillAll, globals.TmplSendKillMulti, true}, + {globals.ScriptUseAll, globals.TmplUseMulti, true}, + {globals.ScriptMetadataAll, globals.TmplMetadataMulti, true}, + {globals.ScriptReplicateFrom, globals.TmplReplicateFromMulti, true}, + {globals.ScriptSysbench, globals.TmplSysbenchMulti, true}, + {globals.ScriptSysbenchReady, globals.TmplSysbenchReadyMulti, true}, + {globals.ScriptExecAll, globals.TmplExecMulti, true}, + }, + } + sbRepl := ScriptBatch{ + tc: ReplicationTemplates, + logger: logger, + data: data, + sandboxDir: sandboxDef.SandboxDir, + scripts: []ScriptDef{ + {useAllSlaves, globals.TmplMultiSourceUseSlaves, true}, + {useAllMasters, globals.TmplMultiSourceUseMasters, true}, + {execAllMasters, globals.TmplMultiSourceExecMasters, true}, + {execAllSlaves, globals.TmplMultiSourceExecSlaves, true}, + {globals.ScriptTestReplication, globals.TmplMultiSourceTest, true}, + {globals.ScriptWipeRestartAll, globals.TmplWipeAndRestartAll, true}, + }, + } + sbGroup := ScriptBatch{ + tc: GroupTemplates, + logger: logger, + data: data, + sandboxDir: sandboxDef.SandboxDir, + scripts: []ScriptDef{ + {globals.ScriptInitializeNodes, initNodesTmpl, true}, + {globals.ScriptCheckNodes, globals.TmplCheckNodes, true}, + }, + } + // InnoDB Cluster specific scripts + sbCluster := ScriptBatch{ + tc: InnoDBClusterTemplates, + logger: logger, + data: data, + sandboxDir: sandboxDef.SandboxDir, + scripts: []ScriptDef{ + {globals.ScriptInitCluster, globals.TmplInitCluster, true}, + {globals.ScriptCheckCluster, globals.TmplCheckCluster, true}, + }, + } + + if !skipRouter { + sbCluster.scripts = append(sbCluster.scripts, + ScriptDef{globals.ScriptRouterStart, globals.TmplRouterStart, true}, + ScriptDef{globals.ScriptRouterStop, globals.TmplRouterStop, true}, + ) + } + + for _, sb := range []ScriptBatch{sbMultiple, sbRepl, sbGroup, sbCluster} { + err := writeScripts(sb) + if err != nil { + return err + } + } + + if sandboxDef.EnableAdminAddress { + logger.Printf("Creating admin script for all nodes\n") + err = writeScript(logger, MultipleTemplates, globals.ScriptUseAllAdmin, + globals.TmplUseMultiAdmin, sandboxDef.SandboxDir, data, true) + if err != nil { + return err + } + } + + logger.Printf("Running parallel tasks\n") + concurrent.RunParallelTasksByPriority(execLists) + + if !sandboxDef.SkipStart { + // First, run the standard GR initialization + common.CondPrintln(path.Join(common.ReplaceLiteralHome(sandboxDef.SandboxDir), globals.ScriptInitializeNodes)) + logger.Printf("Running group replication initialization script\n") + _, err := common.RunCmd(path.Join(sandboxDef.SandboxDir, globals.ScriptInitializeNodes)) + if err != nil { + return fmt.Errorf("error initializing group replication for InnoDB Cluster: %s", err) + } + + // Then bootstrap the cluster via MySQL Shell + common.CondPrintln(path.Join(common.ReplaceLiteralHome(sandboxDef.SandboxDir), globals.ScriptInitCluster)) + logger.Printf("Running InnoDB Cluster initialization script\n") + _, err = common.RunCmd(path.Join(sandboxDef.SandboxDir, globals.ScriptInitCluster)) + if err != nil { + return fmt.Errorf("error initializing InnoDB Cluster: %s", err) + } + + // Bootstrap MySQL Router if requested + if !skipRouter && mysqlrouterPath != "" { + logger.Printf("Bootstrapping MySQL Router\n") + err = bootstrapRouter(mysqlrouterPath, routerDir, basePort+1, sandboxDef.DbPassword, logger) + if err != nil { + common.CondPrintf("WARNING: MySQL Router bootstrap failed: %s\n", err) + common.CondPrintln("The cluster is functional without Router. Use mysqlsh to connect directly.") + } + } + } + + common.CondPrintf("InnoDB Cluster directory installed in %s\n", common.ReplaceLiteralHome(sandboxDef.SandboxDir)) + common.CondPrintf("run 'dbdeployer usage multiple' for basic instructions'\n") + return nil +} + +// bootstrapRouter bootstraps MySQL Router against the InnoDB Cluster. +func bootstrapRouter(mysqlrouterPath, routerDir string, primaryPort int, dbPassword string, logger *defaults.Logger) error { + err := os.MkdirAll(routerDir, globals.PublicDirectoryAttr) + if err != nil { + return fmt.Errorf("error creating router directory: %s", err) + } + + bootstrapURI := fmt.Sprintf("icadmin:icadmin@127.0.0.1:%d", primaryPort) + args := []string{ + "--bootstrap", bootstrapURI, + "--directory", routerDir, + "--force", + "--conf-use-sockets", + } + + logger.Printf("Running: %s %v\n", mysqlrouterPath, args) + _, err = common.RunCmdWithArgs(mysqlrouterPath, args) + if err != nil { + return fmt.Errorf("mysqlrouter bootstrap failed: %s", err) + } + + // Start the router + startScript := path.Join(routerDir, "start.sh") + if common.FileExists(startScript) { + _, err = common.RunCmd(startScript) + if err != nil { + return fmt.Errorf("error starting MySQL Router: %s", err) + } + common.CondPrintln("MySQL Router started") + } + + return nil +} diff --git a/sandbox/replication.go b/sandbox/replication.go index b43e3f4..6942f68 100644 --- a/sandbox/replication.go +++ b/sandbox/replication.go @@ -91,6 +91,7 @@ type ReplicationData struct { NdbNodes int MasterList string SlaveList string + SkipRouter bool } func setChangeMasterProperties(currentProperties string, moreProperties []string, logger *defaults.Logger) string { @@ -642,6 +643,16 @@ func CreateReplicationSandbox(sdef SandboxDef, origin string, replData Replicati common.IntSliceToDottedString(globals.MinimumNdbClusterVersion)) } sdef.SandboxDir = path.Join(sdef.SandboxDir, defaults.Defaults().NdbPrefix+common.VersionToName(origin)) + case globals.InnoDBClusterLabel: + isMinimumVersion, err := common.HasCapability(sdef.Flavor, common.MySQLXDefault, sdef.Version) + if err != nil { + return err + } + if !isMinimumVersion { + return fmt.Errorf(globals.ErrFeatureRequiresVersion, "InnoDB Cluster", + common.IntSliceToDottedString(globals.MinimumMysqlxDefaultVersion)) + } + sdef.SandboxDir = path.Join(sdef.SandboxDir, defaults.Defaults().InnoDBClusterPrefix+common.VersionToName(origin)) default: return fmt.Errorf("unrecognized topology. Accepted: '%v'", globals.AllowedTopologies) } @@ -674,6 +685,8 @@ func CreateReplicationSandbox(sdef SandboxDef, origin string, replData Replicati err = CreatePxcReplication(sdef, origin, replData.Nodes, replData.MasterIp) case globals.NdbLabel: err = CreateNdbReplication(sdef, origin, replData.Nodes, replData.NdbNodes, replData.MasterIp) + case globals.InnoDBClusterLabel: + err = CreateInnoDBCluster(sdef, origin, replData.Nodes, replData.MasterIp, replData.SkipRouter) } return err } diff --git a/sandbox/templates.go b/sandbox/templates.go index 6f70b24..64ab7f2 100644 --- a/sandbox/templates.go +++ b/sandbox/templates.go @@ -445,6 +445,7 @@ var ( "group": GroupTemplates, "pxc": PxcTemplates, "ndb": NdbTemplates, + "cluster": InnoDBClusterTemplates, } ) diff --git a/sandbox/templates/cluster/check_cluster.gotxt b/sandbox/templates/cluster/check_cluster.gotxt new file mode 100644 index 0000000..4783dd4 --- /dev/null +++ b/sandbox/templates/cluster/check_cluster.gotxt @@ -0,0 +1,8 @@ +#!{{.ShellPath}} +{{.Copyright}} +# Generated by dbdeployer {{.AppVersion}} using {{.TemplateName}} on {{.DateTime}} +MYSQLSH={{.MysqlShell}} +$MYSQLSH --uri icadmin:icadmin@127.0.0.1:{{.PrimaryPort}} --js -e " +var cluster = dba.getCluster(); +print(cluster.status()); +" diff --git a/sandbox/templates/cluster/init_cluster.gotxt b/sandbox/templates/cluster/init_cluster.gotxt new file mode 100644 index 0000000..1399656 --- /dev/null +++ b/sandbox/templates/cluster/init_cluster.gotxt @@ -0,0 +1,38 @@ +#!{{.ShellPath}} +{{.Copyright}} +# Generated by dbdeployer {{.AppVersion}} using {{.TemplateName}} on {{.DateTime}} +MYSQLSH={{.MysqlShell}} + +echo "Creating InnoDB Cluster..." + +# Configure the first instance for cluster use +$MYSQLSH --uri root:{{.DbPassword}}@127.0.0.1:{{.PrimaryPort}} -- dba configure-instance --clusterAdmin=icadmin --clusterAdminPassword=icadmin --interactive=false --restart=false + +sleep 2 + +# Create the cluster on the primary +$MYSQLSH --uri icadmin:icadmin@127.0.0.1:{{.PrimaryPort}} --js -e " +var cluster = dba.createCluster('{{.ClusterName}}', {memberWeight: 90}); +print('Cluster created successfully'); +" + +{{range .Replicas}} +echo "Adding instance 127.0.0.1:{{.Port}}..." +# Configure each replica +$MYSQLSH --uri root:{{$.DbPassword}}@127.0.0.1:{{.Port}} -- dba configure-instance --clusterAdmin=icadmin --clusterAdminPassword=icadmin --interactive=false --restart=false + +sleep 2 + +# Add to cluster +$MYSQLSH --uri icadmin:icadmin@127.0.0.1:{{$.PrimaryPort}} --js -e " +var cluster = dba.getCluster(); +cluster.addInstance('icadmin:icadmin@127.0.0.1:{{.Port}}', {recoveryMethod: 'incremental'}); +" +sleep 3 +{{end}} + +echo "Checking cluster status..." +$MYSQLSH --uri icadmin:icadmin@127.0.0.1:{{.PrimaryPort}} --js -e " +var cluster = dba.getCluster(); +print(cluster.status()); +" diff --git a/sandbox/templates/cluster/innodb_cluster_options.gotxt b/sandbox/templates/cluster/innodb_cluster_options.gotxt new file mode 100644 index 0000000..61621b9 --- /dev/null +++ b/sandbox/templates/cluster/innodb_cluster_options.gotxt @@ -0,0 +1,10 @@ +# InnoDB Cluster options +# Generated by dbdeployer {{.AppVersion}} +server-id={{.ServerId}} +gtid_mode=ON +enforce_gtid_consistency=ON +binlog_checksum=NONE +log_replica_updates=ON +report_host={{.MasterIp}} +report_port={{.Port}} +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" diff --git a/sandbox/templates/cluster/router_start.gotxt b/sandbox/templates/cluster/router_start.gotxt new file mode 100644 index 0000000..2891a69 --- /dev/null +++ b/sandbox/templates/cluster/router_start.gotxt @@ -0,0 +1,11 @@ +#!{{.ShellPath}} +{{.Copyright}} +# Generated by dbdeployer {{.AppVersion}} using {{.TemplateName}} on {{.DateTime}} +ROUTER_DIR={{.RouterDir}} +if [ -f $ROUTER_DIR/start.sh ]; then + $ROUTER_DIR/start.sh + echo "MySQL Router started" +else + echo "MySQL Router not bootstrapped" + exit 1 +fi diff --git a/sandbox/templates/cluster/router_stop.gotxt b/sandbox/templates/cluster/router_stop.gotxt new file mode 100644 index 0000000..f9671a0 --- /dev/null +++ b/sandbox/templates/cluster/router_stop.gotxt @@ -0,0 +1,13 @@ +#!{{.ShellPath}} +{{.Copyright}} +# Generated by dbdeployer {{.AppVersion}} using {{.TemplateName}} on {{.DateTime}} +ROUTER_DIR={{.RouterDir}} +if [ -f $ROUTER_DIR/stop.sh ]; then + $ROUTER_DIR/stop.sh + echo "MySQL Router stopped" +elif [ -f $ROUTER_DIR/mysqlrouter.pid ]; then + kill $(cat $ROUTER_DIR/mysqlrouter.pid) 2>/dev/null + echo "MySQL Router stopped" +else + echo "MySQL Router not running" +fi From f14b2f4ab9f22d14d37fd79ced32b7bef0da45d8 Mon Sep 17 00:00:00 2001 From: Rene Cannao Date: Thu, 2 Apr 2026 09:00:40 +0000 Subject: [PATCH 2/3] ci: add InnoDB Cluster integration tests (8.4.8 + 9.5.0) Tests three deployment modes for each MySQL version: 1. InnoDB Cluster with MySQL Router (full stack) 2. InnoDB Cluster with --skip-router (cluster only) 3. InnoDB Cluster with ProxySQL instead of Router Downloads MySQL Shell and MySQL Router tarballs, installs them into the MySQL basedir alongside the server binaries. --- .github/workflows/integration_tests.yml | 136 ++++++++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index b837b40..d4df6b4 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -191,6 +191,142 @@ jobs: done 2>/dev/null || true pkill -9 -u "$USER" postgres 2>/dev/null || true + # Test InnoDB Cluster topology with MySQL Shell + MySQL Router + innodb-cluster-test: + name: InnoDB Cluster (${{ matrix.mysql-version }}) + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + mysql-version: + - '8.4.8' + - '9.5.0' + env: + GO111MODULE: on + SANDBOX_BINARY: ${{ github.workspace }}/opt/mysql + MYSQL_VERSION: ${{ matrix.mysql-version }} + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version: '1.22' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y libaio1 libnuma1 libncurses5 + + - name: Build dbdeployer + run: go build -o dbdeployer . + + - name: Cache MySQL tarball + uses: actions/cache@v4 + with: + path: /tmp/mysql-tarball + key: mysql-${{ matrix.mysql-version }}-linux-x86_64-v2 + + - name: Download MySQL Server + env: + SHORT_VER_ENV: ${{ matrix.mysql-version }} + run: | + SHORT_VER="${SHORT_VER_ENV%.*}" + TARBALL="mysql-${MYSQL_VERSION}-linux-glibc2.17-x86_64.tar.xz" + mkdir -p /tmp/mysql-tarball + if [ ! -f "/tmp/mysql-tarball/$TARBALL" ]; then + curl -L -f -o "/tmp/mysql-tarball/$TARBALL" \ + "https://dev.mysql.com/get/Downloads/MySQL-${SHORT_VER}/$TARBALL" + fi + mkdir -p "$SANDBOX_BINARY" + ./dbdeployer unpack "/tmp/mysql-tarball/$TARBALL" --sandbox-binary="$SANDBOX_BINARY" + + - name: Download and install MySQL Shell + env: + SHORT_VER_ENV: ${{ matrix.mysql-version }} + run: | + SHORT_VER="${SHORT_VER_ENV%.*}" + SHELL_TARBALL="mysql-shell-${MYSQL_VERSION}-linux-glibc2.17-x86-64bit.tar.gz" + echo "Downloading MySQL Shell ${MYSQL_VERSION}..." + curl -L -f -o "/tmp/$SHELL_TARBALL" \ + "https://dev.mysql.com/get/Downloads/MySQL-Shell-${SHORT_VER}/$SHELL_TARBALL" + tar xzf "/tmp/$SHELL_TARBALL" -C /tmp/ + SHELL_DIR=$(ls -d /tmp/mysql-shell-${MYSQL_VERSION}* | head -1) + cp "$SHELL_DIR/bin/mysqlsh" "$SANDBOX_BINARY/${MYSQL_VERSION}/bin/" + echo "mysqlsh installed at $SANDBOX_BINARY/${MYSQL_VERSION}/bin/mysqlsh" + + - name: Download and install MySQL Router + env: + SHORT_VER_ENV: ${{ matrix.mysql-version }} + run: | + SHORT_VER="${SHORT_VER_ENV%.*}" + ROUTER_TARBALL="mysql-router-${MYSQL_VERSION}-linux-glibc2.17-x86_64.tar.xz" + echo "Downloading MySQL Router ${MYSQL_VERSION}..." + curl -L -f -o "/tmp/$ROUTER_TARBALL" \ + "https://dev.mysql.com/get/Downloads/MySQL-Router-${SHORT_VER}/$ROUTER_TARBALL" + tar xJf "/tmp/$ROUTER_TARBALL" -C /tmp/ + ROUTER_DIR=$(ls -d /tmp/mysql-router-${MYSQL_VERSION}* | head -1) + cp "$ROUTER_DIR/bin/mysqlrouter" "$SANDBOX_BINARY/${MYSQL_VERSION}/bin/" + cp -r "$ROUTER_DIR/lib/." "$SANDBOX_BINARY/${MYSQL_VERSION}/lib/" 2>/dev/null || true + echo "mysqlrouter installed at $SANDBOX_BINARY/${MYSQL_VERSION}/bin/mysqlrouter" + + - name: Test InnoDB Cluster with MySQL Router + run: | + echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} with Router ===" + ./dbdeployer deploy replication "$MYSQL_VERSION" \ + --topology=innodb-cluster \ + --sandbox-binary="$SANDBOX_BINARY" \ + --nodes=3 + echo "=== Check cluster status ===" + ~/sandboxes/ic_msb_*/check_cluster + echo "=== Cleanup ===" + ./dbdeployer delete all --skip-confirm + + - name: Test InnoDB Cluster with --skip-router + run: | + echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} without Router ===" + ./dbdeployer deploy replication "$MYSQL_VERSION" \ + --topology=innodb-cluster \ + --skip-router \ + --sandbox-binary="$SANDBOX_BINARY" \ + --nodes=3 + echo "=== Check cluster status ===" + ~/sandboxes/ic_msb_*/check_cluster + echo "=== Cleanup ===" + ./dbdeployer delete all --skip-confirm + + - name: Test InnoDB Cluster with ProxySQL (instead of Router) + run: | + # Install ProxySQL + PROXYSQL_VERSION="3.0.6" + wget -nv -O /tmp/proxysql.deb \ + "https://github.com/sysown/proxysql/releases/download/v${PROXYSQL_VERSION}/proxysql_${PROXYSQL_VERSION}-ubuntu22_amd64.deb" + mkdir -p /tmp/proxysql-extract + dpkg-deb -x /tmp/proxysql.deb /tmp/proxysql-extract + sudo cp /tmp/proxysql-extract/usr/bin/proxysql /usr/local/bin/proxysql + sudo chmod +x /usr/local/bin/proxysql + + echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} + ProxySQL (no Router) ===" + ./dbdeployer deploy replication "$MYSQL_VERSION" \ + --topology=innodb-cluster \ + --skip-router \ + --with-proxysql \ + --sandbox-binary="$SANDBOX_BINARY" \ + --nodes=3 + echo "=== Check cluster ===" + ~/sandboxes/ic_msb_*/check_cluster + echo "=== Check ProxySQL is running ===" + ~/sandboxes/ic_msb_*/proxysql/status + echo "=== Query ProxySQL servers ===" + ~/sandboxes/ic_msb_*/proxysql/use -e "SELECT * FROM runtime_mysql_servers;" || true + + - name: Cleanup + if: always() + run: | + ./dbdeployer delete all --skip-confirm 2>/dev/null || true + pkill -9 -u "$USER" mysqld 2>/dev/null || true + pkill -9 -u "$USER" mysqlrouter 2>/dev/null || true + pkill -9 -u "$USER" proxysql 2>/dev/null || true + # Test the "downloads get-by-version" + "unpack" flow that users follow # from the quickstart guide. This catches registry gaps and download issues. downloads-test: From f3c7ba98c0177e1a3dbb189aed8db0f6110af143 Mon Sep 17 00:00:00 2001 From: Rene Cannao Date: Thu, 2 Apr 2026 09:18:05 +0000 Subject: [PATCH 3/3] ci: add functional verification to all integration tests Every non-trivial CI job now verifies data flow, not just deployment: InnoDB Cluster + Router: - Write on primary, verify replication to node2 and node3 - Write through MySQL Router R/W port, verify replicated InnoDB Cluster + skip-router: - Write on primary, verify replication to node3 InnoDB Cluster + ProxySQL: - Verify ProxySQL has ONLINE backends - Write through ProxySQL, verify replicated to node2 ProxySQL + PostgreSQL: - Write on primary, verify replication to replica1 - ProxySQL proxy connection test (WIP, non-fatal) MySQL replication and ProxySQL+MySQL already had functional tests (check_slaves, test_replication, proxysql-integration-tests.sh). --- .github/workflows/integration_tests.yml | 97 +++++++++++++++---- .../workflows/proxysql_integration_tests.yml | 22 ++++- 2 files changed, 94 insertions(+), 25 deletions(-) diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index d4df6b4..77b424b 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -269,55 +269,112 @@ jobs: cp -r "$ROUTER_DIR/lib/." "$SANDBOX_BINARY/${MYSQL_VERSION}/lib/" 2>/dev/null || true echo "mysqlrouter installed at $SANDBOX_BINARY/${MYSQL_VERSION}/bin/mysqlrouter" + - name: Install ProxySQL + run: | + PROXYSQL_VERSION="3.0.6" + wget -nv -O /tmp/proxysql.deb \ + "https://github.com/sysown/proxysql/releases/download/v${PROXYSQL_VERSION}/proxysql_${PROXYSQL_VERSION}-ubuntu22_amd64.deb" + mkdir -p /tmp/proxysql-extract + dpkg-deb -x /tmp/proxysql.deb /tmp/proxysql-extract + sudo cp /tmp/proxysql-extract/usr/bin/proxysql /usr/local/bin/proxysql + sudo chmod +x /usr/local/bin/proxysql + - name: Test InnoDB Cluster with MySQL Router run: | - echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} with Router ===" + echo "=== Deploy InnoDB Cluster ${MYSQL_VERSION} with Router ===" ./dbdeployer deploy replication "$MYSQL_VERSION" \ --topology=innodb-cluster \ --sandbox-binary="$SANDBOX_BINARY" \ --nodes=3 - echo "=== Check cluster status ===" + + echo "=== Verify cluster status ===" ~/sandboxes/ic_msb_*/check_cluster + + echo "=== Functional test: write on primary, read on all nodes ===" + # Find the primary node (node1) + SBDIR=$(ls -d ~/sandboxes/ic_msb_*) + $SBDIR/node1/use -e "CREATE DATABASE ic_test; USE ic_test; CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY, val VARCHAR(100)); INSERT INTO t1 (val) VALUES ('hello_from_primary');" + sleep 3 + echo "--- Read from node2 (should see replicated data) ---" + RESULT=$($SBDIR/node2/use -e "SELECT val FROM ic_test.t1;" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "hello_from_primary" || { echo "FAIL: data not replicated to node2"; exit 1; } + echo "--- Read from node3 ---" + RESULT=$($SBDIR/node3/use -e "SELECT val FROM ic_test.t1;" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "hello_from_primary" || { echo "FAIL: data not replicated to node3"; exit 1; } + + echo "=== Functional test: connect through MySQL Router ===" + ROUTER_RW_PORT=$(ls $SBDIR/router/mysqlrouter.conf 2>/dev/null && grep -A5 '\[routing:bootstrap_rw\]' $SBDIR/router/mysqlrouter.conf | grep 'bind_port' | awk -F= '{print $2}' | tr -d ' ' || echo "") + if [ -n "$ROUTER_RW_PORT" ]; then + echo "Router R/W port: $ROUTER_RW_PORT" + $SBDIR/node1/use -h 127.0.0.1 -P "$ROUTER_RW_PORT" -e "INSERT INTO ic_test.t1 (val) VALUES ('via_router');" + sleep 2 + RESULT=$($SBDIR/node2/use -e "SELECT val FROM ic_test.t1 WHERE val='via_router';" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "via_router" || { echo "FAIL: write through Router not replicated"; exit 1; } + echo "OK: Router R/W connection works and replication verified" + else + echo "WARN: Could not determine Router R/W port, skipping Router connection test" + fi + echo "=== Cleanup ===" ./dbdeployer delete all --skip-confirm - - name: Test InnoDB Cluster with --skip-router + - name: Test InnoDB Cluster with --skip-router + write/read verification run: | - echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} without Router ===" + echo "=== Deploy InnoDB Cluster ${MYSQL_VERSION} without Router ===" ./dbdeployer deploy replication "$MYSQL_VERSION" \ --topology=innodb-cluster \ --skip-router \ --sandbox-binary="$SANDBOX_BINARY" \ --nodes=3 - echo "=== Check cluster status ===" + + echo "=== Verify cluster status ===" ~/sandboxes/ic_msb_*/check_cluster + + echo "=== Functional test: write/read across cluster ===" + SBDIR=$(ls -d ~/sandboxes/ic_msb_*) + $SBDIR/node1/use -e "CREATE DATABASE skiprt_test; USE skiprt_test; CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY, msg TEXT); INSERT INTO t1 (msg) VALUES ('skip_router_test');" + sleep 3 + RESULT=$($SBDIR/node3/use -e "SELECT msg FROM skiprt_test.t1;" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "skip_router_test" || { echo "FAIL: data not replicated"; exit 1; } + echo "OK: InnoDB Cluster replication works without Router" + echo "=== Cleanup ===" ./dbdeployer delete all --skip-confirm - name: Test InnoDB Cluster with ProxySQL (instead of Router) run: | - # Install ProxySQL - PROXYSQL_VERSION="3.0.6" - wget -nv -O /tmp/proxysql.deb \ - "https://github.com/sysown/proxysql/releases/download/v${PROXYSQL_VERSION}/proxysql_${PROXYSQL_VERSION}-ubuntu22_amd64.deb" - mkdir -p /tmp/proxysql-extract - dpkg-deb -x /tmp/proxysql.deb /tmp/proxysql-extract - sudo cp /tmp/proxysql-extract/usr/bin/proxysql /usr/local/bin/proxysql - sudo chmod +x /usr/local/bin/proxysql - - echo "=== Deploying InnoDB Cluster ${MYSQL_VERSION} + ProxySQL (no Router) ===" + echo "=== Deploy InnoDB Cluster ${MYSQL_VERSION} + ProxySQL ===" ./dbdeployer deploy replication "$MYSQL_VERSION" \ --topology=innodb-cluster \ --skip-router \ --with-proxysql \ --sandbox-binary="$SANDBOX_BINARY" \ --nodes=3 - echo "=== Check cluster ===" + + echo "=== Verify cluster status ===" ~/sandboxes/ic_msb_*/check_cluster - echo "=== Check ProxySQL is running ===" - ~/sandboxes/ic_msb_*/proxysql/status - echo "=== Query ProxySQL servers ===" - ~/sandboxes/ic_msb_*/proxysql/use -e "SELECT * FROM runtime_mysql_servers;" || true + + echo "=== Verify ProxySQL sees the backend servers ===" + SBDIR=$(ls -d ~/sandboxes/ic_msb_*) + SERVERS=$($SBDIR/proxysql/use -e "SELECT hostname, port, hostgroup_id, status FROM runtime_mysql_servers;" 2>&1) + echo "$SERVERS" + # Verify at least 2 servers are ONLINE + ONLINE_COUNT=$(echo "$SERVERS" | grep -c "ONLINE" || true) + echo "Online servers: $ONLINE_COUNT" + [ "$ONLINE_COUNT" -ge 2 ] || { echo "FAIL: expected at least 2 ONLINE servers in ProxySQL"; exit 1; } + + echo "=== Functional test: write through ProxySQL ===" + $SBDIR/proxysql/use_proxy -e "CREATE DATABASE proxy_ic_test; USE proxy_ic_test; CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY, val VARCHAR(100)); INSERT INTO t1 (val) VALUES ('via_proxysql');" + sleep 3 + echo "--- Verify on node2 directly ---" + RESULT=$($SBDIR/node2/use -e "SELECT val FROM proxy_ic_test.t1;" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "via_proxysql" || { echo "FAIL: write through ProxySQL not replicated"; exit 1; } + echo "OK: ProxySQL -> InnoDB Cluster write + replication verified" - name: Cleanup if: always() diff --git a/.github/workflows/proxysql_integration_tests.yml b/.github/workflows/proxysql_integration_tests.yml index e3a898f..c202d44 100644 --- a/.github/workflows/proxysql_integration_tests.yml +++ b/.github/workflows/proxysql_integration_tests.yml @@ -136,12 +136,24 @@ jobs: PG_FULL=$(ls ~/opt/postgresql/ | head -1) echo "=== Deploying PostgreSQL $PG_FULL replication + ProxySQL ===" ./dbdeployer deploy replication "$PG_FULL" --provider=postgresql --with-proxysql + SBDIR=$(ls -d ~/sandboxes/postgresql_repl_*) + echo "=== Check ProxySQL is running ===" - ~/sandboxes/postgresql_repl_*/proxysql/status - echo "=== Check ProxySQL admin interface ===" - ~/sandboxes/postgresql_repl_*/proxysql/use -e "SELECT * FROM pgsql_servers;" || true - echo "=== Connect through ProxySQL (may fail - pgsql auth config is WIP) ===" - ~/sandboxes/postgresql_repl_*/proxysql/use_proxy -c "SELECT 1;" || echo "WARN: ProxySQL pgsql proxy connection failed (expected - auth config WIP)" + $SBDIR/proxysql/status + + echo "=== Check ProxySQL has pgsql_servers configured ===" + $SBDIR/proxysql/use -e "SELECT * FROM pgsql_servers;" || true + + echo "=== Functional test: verify PostgreSQL replication works ===" + $SBDIR/primary/use -c "CREATE TABLE proxy_test(id serial, val text); INSERT INTO proxy_test(val) VALUES ('pg_proxysql_test');" + sleep 2 + RESULT=$($SBDIR/replica1/use -c "SELECT val FROM proxy_test;" 2>&1) + echo "$RESULT" + echo "$RESULT" | grep -q "pg_proxysql_test" || { echo "FAIL: PG replication not working"; exit 1; } + echo "OK: PostgreSQL replication verified with ProxySQL deployed" + + echo "=== ProxySQL proxy connection test (pgsql auth WIP) ===" + $SBDIR/proxysql/use_proxy -c "SELECT 1;" || echo "WARN: ProxySQL pgsql proxy connection failed (expected - auth config WIP)" - name: Cleanup if: always()