diff --git a/go/agent/agent_dao.go b/go/agent/agent_dao.go index 655aaf99..bd990ad6 100644 --- a/go/agent/agent_dao.go +++ b/go/agent/agent_dao.go @@ -21,7 +21,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "strings" @@ -87,9 +87,9 @@ func readResponse(res *http.Response, err error) ([]byte, error) { if err != nil { return nil, err } - defer res.Body.Close() + defer func() { _ = res.Body.Close() }() - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) if err != nil { return nil, err } @@ -120,7 +120,7 @@ func SubmitAgent(hostname string, port int, token string) (string, error) { } // Try to discover topology instances when an agent submits - go DiscoverAgentInstance(hostname, port) + go func() { _ = DiscoverAgentInstance(hostname, port) }() return hostname, err } @@ -129,18 +129,18 @@ func SubmitAgent(hostname string, port int, token string) (string, error) { func DiscoverAgentInstance(hostname string, port int) error { agent, err := GetAgent(hostname) if err != nil { - log.Errorf("Couldn't get agent for %s: %v", hostname, err) + _ = log.Errorf("Couldn't get agent for %s: %v", hostname, err) return err } instanceKey := agent.GetInstance() instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { - log.Errorf("Failed to read topology for %v. err=%+v", instanceKey, err) + _ = log.Errorf("Failed to read topology for %v. err=%+v", instanceKey, err) return err } if instance == nil { - log.Errorf("Failed to read topology for %v", instanceKey) + _ = log.Errorf("Failed to read topology for %v", instanceKey) return err } log.Infof("Discovered Agent Instance: %v", instance.Key) @@ -177,7 +177,7 @@ func ReadOutdatedAgentsHosts() ([]string, error) { }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err } @@ -210,7 +210,7 @@ func ReadAgents() ([]Agent, error) { }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err @@ -322,7 +322,7 @@ func GetAgent(hostname string) (Agent, error) { err = json.Unmarshal(body, &agent.AvailableLocalSnapshots) } if err != nil { - log.Errore(err) + _ = log.Errore(err) } } { @@ -359,7 +359,7 @@ func GetAgent(hostname string) (Agent, error) { mySQLRunningUri := fmt.Sprintf("%s/mysql-status?token=%s", uri, token) body, err := readResponse(httpGet(mySQLRunningUri)) if err == nil { - err = json.Unmarshal(body, &agent.MySQLRunning) + _ = json.Unmarshal(body, &agent.MySQLRunning) } // Actually an error is OK here since "status" returns with non-zero exit code when MySQL not running } @@ -434,7 +434,7 @@ func executeAgentCommandWithMethodFunc(hostname string, command string, methodFu if onResponse != nil { (*onResponse)(body) } - auditAgentOperation("agent-command", &agent, command) + _ = auditAgentOperation("agent-command", &agent, command) return agent, err } @@ -519,7 +519,7 @@ func CustomCommand(hostname string, cmd string) (output string, err error) { func seedCommandCompleted(hostname string, seedId int64) (Agent, bool, error) { result := false onResponse := func(body []byte) { - json.Unmarshal(body, &result) + _ = json.Unmarshal(body, &result) } agent, err := executeAgentCommand(hostname, fmt.Sprintf("seed-command-completed/%d", seedId), &onResponse) return agent, result, err @@ -529,7 +529,7 @@ func seedCommandCompleted(hostname string, seedId int64) (Agent, bool, error) { func seedCommandSucceeded(hostname string, seedId int64) (Agent, bool, error) { result := false onResponse := func(body []byte) { - json.Unmarshal(body, &result) + _ = json.Unmarshal(body, &result) } agent, err := executeAgentCommand(hostname, fmt.Sprintf("seed-command-succeeded/%d", seedId), &onResponse) return agent, result, err @@ -543,7 +543,7 @@ func AbortSeed(seedId int64) error { } for _, seedOperation := range seedOperations { - AbortSeedCommand(seedOperation.TargetHostname, seedId) + _, _ = AbortSeedCommand(seedOperation.TargetHostname, seedId) AbortSeedCommand(seedOperation.SourceHostname, seedId) } updateSeedComplete(seedId, errors.New("Aborted")) @@ -702,8 +702,8 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err if err != nil { return updateSeedStateEntry(seedStateId, err) } - sourceAgent, err = GetAgent(sourceHostname) - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("MySQL data volume on source host %s is %d bytes", sourceHostname, sourceAgent.MountPoint.MySQLDiskUsage), "") + sourceAgent, _ = GetAgent(sourceHostname) + _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("MySQL data volume on source host %s is %d bytes", sourceHostname, sourceAgent.MountPoint.MySQLDiskUsage), "") seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Erasing MySQL data on %s", targetHostname), "") _, err = deleteMySQLDatadir(targetHostname) @@ -719,14 +719,14 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err if sourceAgent.MountPoint.MySQLDiskUsage > targetAgent.MySQLDatadirDiskFree { Unmount(sourceHostname) - return updateSeedStateEntry(seedStateId, fmt.Errorf("Not enough disk space on target host %s. Required: %d, available: %d. Bailing out.", targetHostname, sourceAgent.MountPoint.MySQLDiskUsage, targetAgent.MySQLDatadirDiskFree)) + return updateSeedStateEntry(seedStateId, fmt.Errorf("not enough disk space on target host %s, required: %d, available: %d, bailing out", targetHostname, sourceAgent.MountPoint.MySQLDiskUsage, targetAgent.MySQLDatadirDiskFree)) } // ... - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s will now receive data in background", targetHostname), "") + _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s will now receive data in background", targetHostname), "") ReceiveMySQLSeedData(targetHostname, seedId) - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Waiting %d seconds for %s to start listening for incoming data", config.Config.SeedWaitSecondsBeforeSend, targetHostname), "") + _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Waiting %d seconds for %s to start listening for incoming data", config.Config.SeedWaitSecondsBeforeSend, targetHostname), "") time.Sleep(time.Duration(config.Config.SeedWaitSecondsBeforeSend) * time.Second) seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("%s will now send data to %s in background", sourceHostname, targetHostname), "") @@ -762,7 +762,7 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err AbortSeedCommand(sourceHostname, seedId) AbortSeedCommand(targetHostname, seedId) Unmount(sourceHostname) - return updateSeedStateEntry(seedStateId, errors.New("10 iterations have passed without progress. Bailing out.")) + return updateSeedStateEntry(seedStateId, errors.New("10 iterations have passed without progress, bailing out")) } var copyPct int64 = 0 @@ -795,10 +795,10 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err return updateSeedStateEntry(seedStateId, err) } - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Submitting MySQL instance for discovery: %s", targetHostname), "") + _, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Submitting MySQL instance for discovery: %s", targetHostname), "") SeededAgents <- &targetAgent - seedStateId, _ = submitSeedStateEntry(seedId, "Done", "") + _, _ = submitSeedStateEntry(seedId, "Done", "") return nil } diff --git a/go/agent/instance_topology_agent.go b/go/agent/instance_topology_agent.go index a81d6475..08225ae6 100644 --- a/go/agent/instance_topology_agent.go +++ b/go/agent/instance_topology_agent.go @@ -30,7 +30,7 @@ func SyncReplicaRelayLogs(instance, otherInstance *inst.Instance) (*inst.Instanc var nextCoordinates *inst.BinlogCoordinates var content string onResponse := func(contentBytes []byte) { - json.Unmarshal(contentBytes, &content) + _ = json.Unmarshal(contentBytes, &content) } log.Debugf("SyncReplicaRelayLogs: stopping replication") @@ -72,7 +72,7 @@ Cleanup: return instance, log.Errore(err) } // and we're done (pending deferred functions) - inst.AuditOperation("align-via-relaylogs", &instance.Key, fmt.Sprintf("aligned %+v by relaylogs from %+v", instance.Key, otherInstance.Key)) + _ = inst.AuditOperation("align-via-relaylogs", &instance.Key, fmt.Sprintf("aligned %+v by relaylogs from %+v", instance.Key, otherInstance.Key)) return instance, err } diff --git a/go/app/cli.go b/go/app/cli.go index 073f3948..3fd35d2a 100644 --- a/go/app/cli.go +++ b/go/app/cli.go @@ -121,18 +121,13 @@ func getClusterName(clusterAlias string, instanceKey *inst.InstanceKey) (cluster return clusterName } -func assignThisInstanceKey() *inst.InstanceKey { - log.Debugf("Assuming instance is this machine, %+v", thisInstanceKey) - return thisInstanceKey -} - func validateInstanceIsFound(instanceKey *inst.InstanceKey) (instance *inst.Instance) { instance, _, err := inst.ReadInstance(instanceKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) + _ = log.Fatalf("Instance not found: %+v", *instanceKey) } return instance } @@ -141,7 +136,7 @@ func validateInstanceIsFound(instanceKey *inst.InstanceKey) (instance *inst.Inst // to take multiple instance names separated by a comma or whitespace. func CliWrapper(command string, strict bool, instances string, destination string, owner string, reason string, duration string, pattern string, clusterAlias string, pool string, hostnameFlag string) { if config.Config.RaftEnabled && !*config.RuntimeCLIFlags.IgnoreRaftSetup { - log.Fatalf(`Orchestrator configured to run raft ("RaftEnabled": true). All access must go through the web API of the active raft node. You may use the orchestrator-client script which has a similar interface to the command line invocation. You may override this with --ignore-raft-setup`) + _ = log.Fatalf(`Orchestrator configured to run raft ("RaftEnabled": true). All access must go through the web API of the active raft node. You may use the orchestrator-client script which has a similar interface to the command line invocation. You may override this with --ignore-raft-setup`) } r := regexp.MustCompile(`[ ,\r\n\t]+`) tokens := r.Split(instances, -1) @@ -204,7 +199,7 @@ func Cli(command string, strict bool, instance string, destination string, owner // get os username as owner usr, err := user.Current() if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } owner = usr.Username } @@ -223,26 +218,26 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } _, err := inst.RelocateBelow(instanceKey, destinationKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } case registerCliCommand("relocate-replicas", "Smart relocation", `Relocates all or part of the replicas of a given instance under another instance`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } - replicas, _, err, errs := inst.RelocateReplicas(instanceKey, destinationKey, pattern) + replicas, _, errs, err := inst.RelocateReplicas(instanceKey, destinationKey, pattern) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } else { for _, e := range errs { - log.Errore(e) + _ = log.Errore(e) } for _, replica := range replicas { fmt.Println(replica.Key.DisplayString()) @@ -253,11 +248,11 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } _, _, err := inst.TakeSiblings(instanceKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } @@ -265,7 +260,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } validateInstanceIsFound(instanceKey) @@ -274,12 +269,12 @@ func Cli(command string, strict bool, instance string, destination string, owner postponedFunctionsContainer.Wait() if promotedReplica == nil { - log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) + _ = log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) } - fmt.Println(fmt.Sprintf("%s lost: %d, trivial: %d, pseudo-gtid: %d", - promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas))) + fmt.Printf("%s lost: %d, trivial: %d, pseudo-gtid: %d\n", + promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas)) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } } // General replication commands @@ -289,23 +284,23 @@ func Cli(command string, strict bool, instance string, destination string, owner instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) instance, err := inst.MoveUp(instanceKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) } case registerCliCommand("move-up-replicas", "Classic file:pos relocation", `Moves replicas of the given instance one level up the topology`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } - movedReplicas, _, err, errs := inst.MoveUpReplicas(instanceKey, pattern) + movedReplicas, _, errs, err := inst.MoveUpReplicas(instanceKey, pattern) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } else { for _, e := range errs { - log.Errore(e) + _ = log.Errore(e) } for _, replica := range movedReplicas { fmt.Println(replica.Key.DisplayString()) @@ -316,25 +311,25 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination/sibling:", destination) + _ = log.Fatal("Cannot deduce destination/sibling:", destination) } _, err := inst.MoveBelow(instanceKey, destinationKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } case registerCliCommand("move-equivalent", "Classic file:pos relocation", `Moves a replica beneath another server, based on previously recorded "equivalence coordinates"`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } _, err := inst.MoveEquivalent(instanceKey, destinationKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } case registerCliCommand("repoint", "Classic file:pos relocation", `Make the given instance replicate from another instance without changing the binglog coordinates. Use with care`): { @@ -342,19 +337,19 @@ func Cli(command string, strict bool, instance string, destination string, owner // destinationKey can be null, in which case the instance repoints to its existing master instance, err := inst.Repoint(instanceKey, destinationKey, inst.GTIDHintNeutral) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) } case registerCliCommand("repoint-replicas", "Classic file:pos relocation", `Repoint all replicas of given instance to replicate back from the instance. Use with care`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) - repointedReplicas, err, errs := inst.RepointReplicasTo(instanceKey, pattern, destinationKey) + repointedReplicas, errs, err := inst.RepointReplicasTo(instanceKey, pattern, destinationKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } else { for _, e := range errs { - log.Errore(e) + _ = log.Errore(e) } for _, replica := range repointedReplicas { fmt.Println(fmt.Sprintf("%s<%s", replica.Key.DisplayString(), instanceKey.DisplayString())) @@ -365,11 +360,11 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } _, err := inst.TakeMaster(instanceKey, false) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } @@ -378,7 +373,7 @@ func Cli(command string, strict bool, instance string, destination string, owner instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) _, err := inst.MakeCoMaster(instanceKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } fmt.Println(instanceKey.DisplayString()) } @@ -386,12 +381,12 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } instance, _, _, _, _, err := inst.GetCandidateReplica(instanceKey, false) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } else { fmt.Println(instance.Key.DisplayString()) } @@ -400,17 +395,17 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } validateInstanceIsFound(instanceKey) _, promotedBinlogServer, err := inst.RegroupReplicasBinlogServers(instanceKey, false) if promotedBinlogServer == nil { - log.Fatalf("Could not regroup binlog server replicas of %+v; error: %+v", *instanceKey, err) + _ = log.Fatalf("Could not regroup binlog server replicas of %+v; error: %+v", *instanceKey, err) } fmt.Println(promotedBinlogServer.Key.DisplayString()) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } } // move, GTID @@ -418,23 +413,23 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } _, err := inst.MoveBelowGTID(instanceKey, destinationKey) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } case registerCliCommand("move-replicas-gtid", "GTID relocation", `Moves all replicas of a given instance under another (destination) instance using GTID`): { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } - movedReplicas, _, err, errs := inst.MoveReplicasGTID(instanceKey, destinationKey, pattern) + movedReplicas, _, errs, err := inst.MoveReplicasGTID(instanceKey, destinationKey, pattern) if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } else { for _, e := range errs { log.Errore(e) @@ -448,7 +443,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } validateInstanceIsFound(instanceKey) @@ -456,7 +451,7 @@ func Cli(command string, strict bool, instance string, destination string, owner lostReplicas = append(lostReplicas, cannotReplicateReplicas...) if promotedReplica == nil { - log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) + _ = log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) } fmt.Println(fmt.Sprintf("%s lost: %d, moved: %d", promotedReplica.Key.DisplayString(), len(lostReplicas), len(movedReplicas))) @@ -469,13 +464,13 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } _, _, err := inst.MatchBelow(instanceKey, destinationKey, true) if err != nil { log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), destinationKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), destinationKey.DisplayString()) } case registerCliCommand("match-up", "Pseudo-GTID relocation", `Transport the replica one level up the hierarchy, making it child of its grandparent, using Pseudo-GTID`): { @@ -484,7 +479,7 @@ func Cli(command string, strict bool, instance string, destination string, owner if err != nil { log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) } case registerCliCommand("rematch", "Pseudo-GTID relocation", `Reconnect a replica onto its master, via PSeudo-GTID.`): { @@ -493,20 +488,20 @@ func Cli(command string, strict bool, instance string, destination string, owner if err != nil { log.Fatale(err) } - fmt.Println(fmt.Sprintf("%s<%s", instanceKey.DisplayString(), instance.MasterKey.DisplayString())) + fmt.Printf("%s<%s\n", instanceKey.DisplayString(), instance.MasterKey.DisplayString()) } case registerCliCommand("match-replicas", "Pseudo-GTID relocation", `Matches all replicas of a given instance under another (destination) instance using Pseudo-GTID`): { // Move all replicas of "instance" beneath "destination" instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } if destinationKey == nil { - log.Fatal("Cannot deduce destination:", destination) + _ = log.Fatal("Cannot deduce destination:", destination) } - matchedReplicas, _, err, errs := inst.MultiMatchReplicas(instanceKey, destinationKey, pattern) + matchedReplicas, _, errs, err := inst.MultiMatchReplicas(instanceKey, destinationKey, pattern) if err != nil { log.Fatale(err) } else { @@ -522,10 +517,10 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } - matchedReplicas, _, err, errs := inst.MatchUpReplicas(instanceKey, pattern) + matchedReplicas, _, errs, err := inst.MatchUpReplicas(instanceKey, pattern) if err != nil { log.Fatale(err) } else { @@ -541,7 +536,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } validateInstanceIsFound(instanceKey) @@ -550,10 +545,10 @@ func Cli(command string, strict bool, instance string, destination string, owner lostReplicas = append(lostReplicas, cannotReplicateReplicas...) postponedFunctionsContainer.Wait() if promotedReplica == nil { - log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) + _ = log.Fatalf("Could not regroup replicas of %+v; error: %+v", *instanceKey, err) } - fmt.Println(fmt.Sprintf("%s lost: %d, trivial: %d, pseudo-gtid: %d", - promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas))) + fmt.Printf("%s lost: %d, trivial: %d, pseudo-gtid: %d\n", + promotedReplica.Key.DisplayString(), len(lostReplicas), len(equalReplicas), len(aheadReplicas)) if err != nil { log.Fatale(err) } @@ -586,7 +581,7 @@ func Cli(command string, strict bool, instance string, destination string, owner log.Fatale(err) } if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) + _ = log.Fatalf("Instance not found: %+v", *instanceKey) } fmt.Println(instance.GtidErrant) } @@ -648,7 +643,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } _, err := inst.DetachReplicaMasterHost(instanceKey) if err != nil { @@ -660,7 +655,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatal("Cannot deduce instance:", instance) + _ = log.Fatal("Cannot deduce instance:", instance) } _, err := inst.ReattachReplicaMasterHost(instanceKey) if err != nil { @@ -672,19 +667,19 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) + _ = log.Fatalf("Instance not found: %+v", *instanceKey) } var binlogCoordinates *inst.BinlogCoordinates if binlogCoordinates, err = inst.ParseBinlogCoordinates(*config.RuntimeCLIFlags.BinlogFile); err != nil { - log.Fatalf("Expecing --binlog argument as file:pos") + _ = log.Fatalf("Expecing --binlog argument as file:pos") } _, err = inst.MasterPosWait(instanceKey, binlogCoordinates) if err != nil { @@ -732,7 +727,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } statements, err := inst.GetReplicationRestartPreserveStatements(instanceKey, *config.RuntimeCLIFlags.Statement) if err != nil { @@ -747,11 +742,11 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } instance := validateInstanceIsFound(instanceKey) if destinationKey == nil { - log.Fatal("Cannot deduce target instance:", destination) + _ = log.Fatal("Cannot deduce target instance:", destination) } otherInstance := validateInstanceIsFound(destinationKey) @@ -763,7 +758,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } instance := validateInstanceIsFound(instanceKey) if instance.ReplicaRunning() { @@ -774,7 +769,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } instance := validateInstanceIsFound(instanceKey) if instance.ReplicationThreadsStopped() { @@ -833,14 +828,14 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } instance, err := inst.ReadTopologyInstance(instanceKey) if err != nil { log.Fatale(err) } if instance == nil { - log.Fatalf("Instance not found: %+v", *instanceKey) + _ = log.Fatalf("Instance not found: %+v", *instanceKey) } coordinates, text, err := inst.FindLastPseudoGTIDEntry(instance, instance.RelaylogCoordinates, nil, strict, nil) if err != nil { @@ -852,7 +847,7 @@ func Cli(command string, strict bool, instance string, destination string, owner { instanceKey, _ = inst.FigureInstanceKey(instanceKey, thisInstanceKey) if instanceKey == nil { - log.Fatalf("Unresolved instance") + _ = log.Fatalf("Unresolved instance") } errantBinlogs, err := inst.LocateErrantGTID(instanceKey) if err != nil { @@ -1328,7 +1323,7 @@ func Cli(command string, strict bool, instance string, destination string, owner if err != nil { log.Fatale(err) } - inst.PutInstanceTag(instanceKey, tag) + _ = inst.PutInstanceTag(instanceKey, tag) fmt.Println(instanceKey.DisplayString()) } case registerCliCommand("untag", "tags", `Remove a tag from an instance`): @@ -1394,7 +1389,7 @@ func Cli(command string, strict bool, instance string, destination string, owner if reason == "" { log.Fatal("--reason option required") } - var durationSeconds int = 0 + var durationSeconds = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { @@ -1440,7 +1435,7 @@ func Cli(command string, strict bool, instance string, destination string, owner if reason == "" { log.Fatal("--reason option required") } - var durationSeconds int = 0 + var durationSeconds = 0 if duration != "" { durationSeconds, err = util.SimpleTimeToSeconds(duration) if err != nil { @@ -1664,7 +1659,7 @@ func Cli(command string, strict bool, instance string, destination string, owner } if conn, err := net.Dial("tcp", rawInstanceKey.DisplayString()); err == nil { log.Debugf("tcp test is good; got connection %+v", conn) - conn.Close() + _ = conn.Close() } else { log.Fatale(err) } @@ -1825,14 +1820,14 @@ func Cli(command string, strict bool, instance string, destination string, owner if err != nil { log.Fatale(err) } - defer db.Close() - defer rows.Close() + defer func() { _ = db.Close() }() + defer func() { _ = rows.Close() }() fmt.Printf("%-12s %-30s %-6s %-15s %-6s\n", "HOSTGROUP", "HOSTNAME", "PORT", "STATUS", "WEIGHT") for rows.Next() { var hg, port, weight int var hostname, status string if err := rows.Scan(&hg, &hostname, &port, &status, &weight); err != nil { - log.Errorf("Error scanning row: %v", err) + _ = log.Errorf("Error scanning row: %v", err) continue } fmt.Printf("%-12d %-30s %-6d %-15s %-6d\n", hg, hostname, port, status, weight) diff --git a/go/app/http.go b/go/app/http.go index 65353759..d99393d2 100644 --- a/go/app/http.go +++ b/go/app/http.go @@ -48,7 +48,7 @@ var discoveryMetrics *collection.Collection func Http(continuousDiscovery bool) { promptForSSLPasswords() ometrics.InitPrometheus() - process.ContinuousRegistration(process.OrchestratorExecutionHttpMode, "") + process.ContinuousRegistration(string(process.OrchestratorExecutionHttpMode), "") if config.Config.ServeAgentsHttp { go agentsHttp() @@ -85,7 +85,7 @@ func standardHttp(continuousDiscovery bool) { { if config.Config.HTTPAuthUser == "" { // Still allowed; may be disallowed in future versions - log.Warning("AuthenticationMethod is configured as 'basic' but HTTPAuthUser undefined. Running without authentication.") + _ = log.Warning("AuthenticationMethod is configured as 'basic' but HTTPAuthUser undefined. Running without authentication.") } router.Use(http.BasicAuthMiddleware(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword)) } @@ -142,7 +142,7 @@ func standardHttp(continuousDiscovery bool) { if err != nil { log.Fatale(err) } - defer unixListener.Close() + defer func() { _ = unixListener.Close() }() if err := nethttp.Serve(unixListener, router); err != nil { log.Fatale(err) } diff --git a/go/attributes/attributes_dao.go b/go/attributes/attributes_dao.go index a614a8cb..e9604874 100644 --- a/go/attributes/attributes_dao.go +++ b/go/attributes/attributes_dao.go @@ -17,10 +17,7 @@ package attributes import ( - "errors" "fmt" - "io/ioutil" - "net/http" "strings" "github.com/proxysql/golib/log" @@ -28,24 +25,6 @@ import ( "github.com/proxysql/orchestrator/go/db" ) -func readResponse(res *http.Response, err error) ([]byte, error) { - if err != nil { - return nil, err - } - - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - if res.Status == "500" { - return body, errors.New("Response Status 500") - } - - return body, nil -} - // SetHostAttributes func SetHostAttributes(hostname string, attributeName string, attributeValue string) error { _, err := db.ExecOrchestrator(` @@ -96,7 +75,7 @@ func getHostAttributesByClause(whereClause string, args []interface{}) ([]HostAt }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err } diff --git a/go/cmd/orchestrator/main.go b/go/cmd/orchestrator/main.go index 2e257e71..0347fee1 100644 --- a/go/cmd/orchestrator/main.go +++ b/go/cmd/orchestrator/main.go @@ -68,7 +68,7 @@ func main() { flag.Parse() if *destination != "" && *sibling != "" { - log.Fatalf("-s and -d are synonyms, yet both were specified. You're probably doing the wrong thing.") + _ = log.Fatalf("-s and -d are synonyms, yet both were specified. You're probably doing the wrong thing.") } switch *config.RuntimeCLIFlags.PromotionRule { case "prefer", "neutral", "prefer_not", "must_not": @@ -77,7 +77,7 @@ func main() { } default: { - log.Fatalf("-promotion-rule only supports prefer|neutral|prefer_not|must_not") + _ = log.Fatalf("-promotion-rule only supports prefer|neutral|prefer_not|must_not") } } if *destination == "" { @@ -125,11 +125,11 @@ func main() { log.SetLevel(log.ERROR) } if config.Config.EnableSyslog { - log.EnableSyslogWriter("orchestrator") + _ = log.EnableSyslogWriter("orchestrator") log.SetSyslogLevel(log.INFO) } if config.Config.AuditToSyslog { - inst.EnableAuditSyslog() + _ = inst.EnableAuditSyslog() } config.RuntimeCLIFlags.ConfiguredVersion = AppVersion config.MarkConfigurationLoaded() diff --git a/go/collection/collection.go b/go/collection/collection.go index b7c56ac1..76cb654c 100644 --- a/go/collection/collection.go +++ b/go/collection/collection.go @@ -180,7 +180,7 @@ func (c *Collection) StartAutoExpiration() { for { select { case <-ticker.C: // do the periodic expiry - c.removeBefore(time.Now().Add(-c.expirePeriod)) + _ = c.removeBefore(time.Now().Add(-c.expirePeriod)) case <-c.done: // stop the ticker and return ticker.Stop() return @@ -280,10 +280,6 @@ func (c *Collection) Append(m Metric) error { } c.Lock() defer c.Unlock() - // we don't want to add nil metrics - if c == nil { - return errors.New("Collection.Append: c == nil") - } c.collection = append(c.collection, m) return nil diff --git a/go/collection/collection_test.go b/go/collection/collection_test.go index d98af7ac..6eaba508 100644 --- a/go/collection/collection_test.go +++ b/go/collection/collection_test.go @@ -61,7 +61,7 @@ func TestCreateOrReturnCollection(t *testing.T) { // TestExpirePeriod checks that the set expire period is returned func TestExpirePeriod(t *testing.T) { oneSecond := time.Second - twoSeconds := 2 * oneSecond + twoDuration := 2 * oneSecond // create a new collection c := &Collection{} @@ -73,9 +73,9 @@ func TestExpirePeriod(t *testing.T) { } // change the period and check again - c.SetExpirePeriod(twoSeconds) - if c.ExpirePeriod() != twoSeconds { - t.Errorf("TestExpirePeriod: did not get back twoSeconds") + c.SetExpirePeriod(twoDuration) + if c.ExpirePeriod() != twoDuration { + t.Errorf("TestExpirePeriod: did not get back twoDuration") } } @@ -96,7 +96,7 @@ func TestAppend(t *testing.T) { } for _, v := range []int{1, 2, 3} { tm := &testMetric{} - c.Append(tm) + _ = c.Append(tm) if len(c.Metrics()) != v { t.Errorf("TestExpirePeriod: len(Metrics) = %d, expecting %d", len(c.Metrics()), v) } diff --git a/go/config/config.go b/go/config/config.go index 339ccc0d..fca06eb0 100644 --- a/go/config/config.go +++ b/go/config/config.go @@ -64,26 +64,6 @@ const ( ConsulMaxTransactionOps = 64 ) -var deprecatedConfigurationVariables = []string{ - "DatabaselessMode__experimental", - "BufferBinlogEvents", - "BinlogFileHistoryDays", - "MaintenanceOwner", - "ReadLongRunningQueries", - "DiscoveryPollSeconds", - "ActiveNodeExpireSeconds", - "AuditPageSize", - "SlaveStartPostWaitMilliseconds", - "MySQLTopologyMaxPoolConnections", - "MaintenancePurgeDays", - "MaintenanceExpireMinutes", - "HttpTimeoutSeconds", - "AgentAutoDiscover", - "PseudoGTIDCoordinatesHistoryHeuristicMinutes", - "PseudoGTIDPreferIndependentMultiMatch", - "MaxOutdatedKeysToShow", -} - // Configuration makes for orchestrator configuration input, which can be provided by user via JSON formatted file. // Some of the parameteres have reasonable default values, and some (like database credentials) are // strictly expected from user. @@ -500,7 +480,7 @@ func (this *Configuration) postReadAdjustments() error { }{} err := gcfg.ReadFileInto(&mySQLConfig, this.MySQLOrchestratorCredentialsConfigFile) if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) + _ = log.Fatalf("Failed to parse gcfg data from file: %+v", err) } else { log.Debugf("Parsed orchestrator credentials from %s", this.MySQLOrchestratorCredentialsConfigFile) this.MySQLOrchestratorUser = mySQLConfig.Client.User @@ -524,7 +504,7 @@ func (this *Configuration) postReadAdjustments() error { }{} err := gcfg.ReadFileInto(&mySQLConfig, this.MySQLTopologyCredentialsConfigFile) if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) + _ = log.Fatalf("Failed to parse gcfg data from file: %+v", err) } else { log.Debugf("Parsed topology credentials from %s", this.MySQLTopologyCredentialsConfigFile) this.MySQLTopologyUser = mySQLConfig.Client.User @@ -573,7 +553,7 @@ func (this *Configuration) postReadAdjustments() error { } } if this.FailMasterPromotionIfSQLThreadNotUpToDate && this.DelayMasterPromotionIfSQLThreadNotUpToDate { - return fmt.Errorf("Cannot have both FailMasterPromotionIfSQLThreadNotUpToDate and DelayMasterPromotionIfSQLThreadNotUpToDate enabled") + return fmt.Errorf("cannot have both FailMasterPromotionIfSQLThreadNotUpToDate and DelayMasterPromotionIfSQLThreadNotUpToDate enabled") } if this.FailMasterPromotionOnLagMinutes > 0 && this.ReplicationLagQuery == "" { return fmt.Errorf("nonzero FailMasterPromotionOnLagMinutes requires ReplicationLagQuery to be set") @@ -598,9 +578,6 @@ func (this *Configuration) postReadAdjustments() error { if this.IsSQLite() && this.SQLite3DataFile == "" { return fmt.Errorf("SQLite3DataFile must be set when BackendDB is sqlite3") } - if this.IsSQLite() { - // this.HostnameResolveMethod = "none" - } if this.RaftEnabled && this.RaftDataDir == "" { return fmt.Errorf("RaftDataDir must be defined since raft is enabled (RaftEnabled)") } @@ -626,10 +603,10 @@ func (this *Configuration) postReadAdjustments() error { if this.HTTPAdvertise != "" { u, err := url.Parse(this.HTTPAdvertise) if err != nil { - return fmt.Errorf("Failed parsing HTTPAdvertise %s: %s", this.HTTPAdvertise, err.Error()) + return fmt.Errorf("failed parsing HTTPAdvertise %s: %s", this.HTTPAdvertise, err.Error()) } if u.Scheme == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include scheme (http:// or https://)") + return fmt.Errorf("if specified, HTTPAdvertise must include scheme (http:// or https://)") } if u.Hostname() == "" { return fmt.Errorf("If specified, HTTPAdvertise must include host name") @@ -686,10 +663,10 @@ func read(fileName string) (*Configuration, error) { if err == nil { log.Infof("Read config: %s", fileName) } else { - log.Fatal("Cannot read config file:", fileName, err) + _ = log.Fatal("Cannot read config file:", fileName, err) } if err := Config.postReadAdjustments(); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } return Config, err } @@ -698,7 +675,7 @@ func read(fileName string) (*Configuration, error) { // A file can override configuration provided in previous file. func Read(fileNames ...string) *Configuration { for _, fileName := range fileNames { - read(fileName) + _, _ = read(fileName) } readFileNames = fileNames return Config @@ -708,7 +685,7 @@ func Read(fileNames ...string) *Configuration { func ForceRead(fileName string) *Configuration { _, err := read(fileName) if err != nil { - log.Fatal("Cannot read config file:", fileName, err) + _ = log.Fatal("Cannot read config file:", fileName, err) } readFileNames = []string{fileName} return Config @@ -717,10 +694,10 @@ func ForceRead(fileName string) *Configuration { // Reload re-reads configuration from last used files func Reload(extraFileNames ...string) *Configuration { for _, fileName := range readFileNames { - read(fileName) + _, _ = read(fileName) } for _, fileName := range extraFileNames { - read(fileName) + _, _ = read(fileName) } return Config } diff --git a/go/db/db.go b/go/db/db.go index f03b0d90..13175665 100644 --- a/go/db/db.go +++ b/go/db/db.go @@ -139,10 +139,6 @@ func IsSQLite() bool { return config.Config.IsSQLite() } -func isInMemorySQLite() bool { - return config.Config.IsSQLite() && strings.Contains(config.Config.SQLite3DataFile, ":memory:") -} - // matchDSN tries to match the DSN or returns an error func matchDSN(dsn string) (string, error) { re := regexp.MustCompile(dsnRegexp) @@ -216,7 +212,7 @@ func OpenOrchestrator() (db *sql.DB, err error) { } if err == nil && !fromCache { if !config.Config.SkipOrchestratorDatabaseUpdate { - initOrchestratorDB(db) + _ = initOrchestratorDB(db) } // A low value here will trigger reconnects which could // make the number of backend connections hit the tcp @@ -275,7 +271,7 @@ func registerOrchestratorDeployment(db *sql.DB) error { ) ` if _, err := execInternal(db, query, config.RuntimeCLIFlags.ConfiguredVersion); err != nil { - log.Fatalf("Unable to write to orchestrator_metadata: %+v", err) + _ = log.Fatalf("Unable to write to orchestrator_metadata: %+v", err) } log.Debugf("Migrated database schema to version [%+v]", config.RuntimeCLIFlags.ConfiguredVersion) return nil @@ -286,7 +282,7 @@ func registerOrchestratorDeployment(db *sql.DB) error { func deployStatements(db *sql.DB, queries []string) error { tx, err := db.Begin() if err != nil { - log.Fatale(err) + _ = log.Fatale(err) } // Ugly workaround ahead. // Origin of this workaround is the existence of some "timestamp NOT NULL," column definitions, @@ -298,19 +294,15 @@ func deployStatements(db *sql.DB, queries []string) error { // My bad. originalSqlMode := "" if config.Config.IsMySQL() { - err = tx.QueryRow(`select @@session.sql_mode`).Scan(&originalSqlMode) + _ = tx.QueryRow(`select @@session.sql_mode`).Scan(&originalSqlMode) if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', '')`); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_IN_DATE', '')`); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } } - for i, query := range queries { - if i == 0 { - //log.Debugf("sql_mode is: %+v", originalSqlMode) - } - + for _, query := range queries { query, err := translateStatement(query) if err != nil { return log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) @@ -327,17 +319,17 @@ func deployStatements(db *sql.DB, queries []string) error { !strings.Contains(err.Error(), "check that column/key exists") && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "Duplicate key name") { - log.Errorf("Error initiating orchestrator: %+v; query=%+v", err, query) + _ = log.Errorf("Error initiating orchestrator: %+v; query=%+v", err, query) } } } if config.Config.IsMySQL() { if _, err := tx.Exec(`set session sql_mode=?`, originalSqlMode); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } } if err := tx.Commit(); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } return nil } @@ -353,16 +345,16 @@ func initOrchestratorDB(db *sql.DB) error { return nil } if config.Config.PanicIfDifferentDatabaseDeploy && config.RuntimeCLIFlags.ConfiguredVersion != "" && !versionAlreadyDeployed { - log.Fatalf("PanicIfDifferentDatabaseDeploy is set. Configured version %s is not the version found in the database", config.RuntimeCLIFlags.ConfiguredVersion) + _ = log.Fatalf("PanicIfDifferentDatabaseDeploy is set. Configured version %s is not the version found in the database", config.RuntimeCLIFlags.ConfiguredVersion) } log.Debugf("Migrating database schema") - deployStatements(db, generateSQLBase) - deployStatements(db, generateSQLPatches) - registerOrchestratorDeployment(db) + _ = deployStatements(db, generateSQLBase) + _ = deployStatements(db, generateSQLPatches) + _ = registerOrchestratorDeployment(db) if IsSQLite() { - ExecOrchestrator(`PRAGMA journal_mode = WAL`) - ExecOrchestrator(`PRAGMA synchronous = NORMAL`) + _, _ = ExecOrchestrator(`PRAGMA journal_mode = WAL`) + _, _ = ExecOrchestrator(`PRAGMA synchronous = NORMAL`) } return nil diff --git a/go/db/tls.go b/go/db/tls.go index 07f1b764..f1d49f52 100644 --- a/go/db/tls.go +++ b/go/db/tls.go @@ -50,10 +50,10 @@ var readInstanceTLSCacheCounter = metrics.NewCounter() var writeInstanceTLSCacheCounter = metrics.NewCounter() func init() { - metrics.Register("instance_tls.read", readInstanceTLSCounter) - metrics.Register("instance_tls.write", writeInstanceTLSCounter) - metrics.Register("instance_tls.read_cache", readInstanceTLSCacheCounter) - metrics.Register("instance_tls.write_cache", writeInstanceTLSCacheCounter) + _ = metrics.Register("instance_tls.read", readInstanceTLSCounter) + _ = metrics.Register("instance_tls.write", writeInstanceTLSCounter) + _ = metrics.Register("instance_tls.read_cache", readInstanceTLSCacheCounter) + _ = metrics.Register("instance_tls.write_cache", writeInstanceTLSCacheCounter) } type SqlUtilsLogger struct { @@ -63,7 +63,7 @@ type SqlUtilsLogger struct { func (logger SqlUtilsLogger) OnError(caller_context string, query string, err error) error { query = strings.Join(strings.Fields(query), " ") // trim whitespaces - query = strings.Replace(query, "%", "%%", -1) // escape % + query = strings.ReplaceAll(query, "%", "%%") // escape % msg := fmt.Sprintf("%+v(%+v) %+v: %+v", caller_context, @@ -94,7 +94,7 @@ func (logger SqlUtilsLogger) ValidateQuery(query string) { lquery := strings.ToLower(query) if strings.Contains(lquery, "master") || strings.Contains(lquery, "slave") { - log.Error("QUERY CONTAINS MASTER / SLAVE: ") + _ = log.Error("QUERY CONTAINS MASTER / SLAVE: ") // panic("Query contains master/slave: " + query) } } @@ -125,7 +125,7 @@ func requiresTLS(host string, port int, mysql_uri string) bool { required=values(required) ` if _, err := ExecOrchestrator(query, host, port, required); err != nil { - log.Errore(err) + _ = log.Errore(err) } writeInstanceTLSCounter.Inc(1) diff --git a/go/discovery/queue.go b/go/discovery/queue.go index 375c8bd1..ddcfefe9 100644 --- a/go/discovery/queue.go +++ b/go/discovery/queue.go @@ -176,7 +176,7 @@ func (q *Queue) Consume() inst.InstanceKey { // alarm if have been waiting for too long timeOnQueue := time.Since(q.queuedKeys[key]) if timeOnQueue > time.Duration(config.Config.InstancePollSeconds)*time.Second { - log.Warningf("key %v spent %.4fs waiting on a discoveryQueue", key, timeOnQueue.Seconds()) + _ = log.Warningf("key %v spent %.4fs waiting on a discoveryQueue", key, timeOnQueue.Seconds()) } q.consumedKeys[key] = q.queuedKeys[key] diff --git a/go/http/agents_api.go b/go/http/agents_api.go index fdd49a0b..bef4fe60 100644 --- a/go/http/agents_api.go +++ b/go/http/agents_api.go @@ -90,7 +90,7 @@ func (this *HttpAgentsAPI) AgentsHosts(w http.ResponseWriter, r *http.Request) { if r.URL.Query().Get("format") == "txt" { w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(strings.Join(hostnames, "\n"))) + _, _ = w.Write([]byte(strings.Join(hostnames, "\n"))) } else { renderJSON(w, 200, hostnames) } @@ -111,7 +111,7 @@ func (this *HttpAgentsAPI) AgentsInstances(w http.ResponseWriter, r *http.Reques if r.URL.Query().Get("format") == "txt" { w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(strings.Join(hostnames, "\n"))) + _, _ = w.Write([]byte(strings.Join(hostnames, "\n"))) } else { renderJSON(w, 200, hostnames) } diff --git a/go/http/api.go b/go/http/api.go index ef92da21..2096b89d 100644 --- a/go/http/api.go +++ b/go/http/api.go @@ -129,7 +129,7 @@ func setupMessagePrefix() { return } if act != "FQDN" && act != "hostname" && act != "custom" { - log.Warning("PrependMessagesWithOrcIdentity option has unsupported value '%+v'") + _ = log.Warningf("PrependMessagesWithOrcIdentity option has unsupported value '%+v'", act) return } @@ -139,7 +139,7 @@ func setupMessagePrefix() { if act == "FQDN" { if hostname, err = fqdn.FqdnHostname(); err != nil { - log.Warning("Failed to get Orchestrator's FQDN. Falling back to hostname.") + _ = log.Warning("Failed to get Orchestrator's FQDN. Falling back to hostname.") hostname = "" fallbackActive = true } @@ -147,7 +147,7 @@ func setupMessagePrefix() { if fallbackActive || act == "hostname" { fallbackActive = false if hostname, err = os.Hostname(); err != nil { - log.Warning("Failed to get Orchestrator's FQDN. Falling back to custom prefix (if provided).") + _ = log.Warning("Failed to get Orchestrator's FQDN. Falling back to custom prefix (if provided).") hostname = "" fallbackActive = true } @@ -158,7 +158,7 @@ func setupMessagePrefix() { if hostname != "" { messagePrefix = fmt.Sprintf("Orchestrator %+v says: ", hostname) } else { - log.Warning("Prepending messages with Orchestrator identity was requested, but identity cannot be determined. Skipping prefix.") + _ = log.Warning("Prepending messages with Orchestrator identity was requested, but identity cannot be determined. Skipping prefix.") } } @@ -287,7 +287,7 @@ func (this *HttpAPI) Discover(w http.ResponseWriter, r *http.Request) { } if orcraft.IsRaftEnabled() { - orcraft.PublishCommand("discover", instanceKey) + _, _ = orcraft.PublishCommand("discover", instanceKey) } else { logic.DiscoverInstance(instanceKey) } @@ -358,9 +358,9 @@ func (this *HttpAPI) ForgetCluster(w http.ResponseWriter, r *http.Request) { } if orcraft.IsRaftEnabled() { - orcraft.PublishCommand("forget-cluster", clusterName) + _, _ = orcraft.PublishCommand("forget-cluster", clusterName) } else { - inst.ForgetCluster(clusterName) + _ = inst.ForgetCluster(clusterName) } Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Cluster forgotten: %+v", clusterName)}) } @@ -375,7 +375,7 @@ func (this *HttpAPI) Resolve(w http.ResponseWriter, r *http.Request) { } if conn, err := net.Dial("tcp", instanceKey.DisplayString()); err == nil { - conn.Close() + _ = conn.Close() } else { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -490,7 +490,7 @@ func (this *HttpAPI) BeginDowntime(w http.ResponseWriter, r *http.Request) { return } - var durationSeconds int = 0 + var durationSeconds = 0 if chi.URLParam(r, "duration") != "" { durationSeconds, err = util.SimpleTimeToSeconds(chi.URLParam(r, "duration")) if durationSeconds < 0 { @@ -575,7 +575,7 @@ func (this *HttpAPI) MoveUpReplicas(w http.ResponseWriter, r *http.Request) { return } - replicas, newMaster, err, errs := inst.MoveUpReplicas(&instanceKey, r.URL.Query().Get("pattern")) + replicas, newMaster, errs, err := inst.MoveUpReplicas(&instanceKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -623,7 +623,7 @@ func (this *HttpAPI) RepointReplicas(w http.ResponseWriter, r *http.Request) { return } - replicas, err, _ := inst.RepointReplicas(&instanceKey, r.URL.Query().Get("pattern")) + replicas, _, err := inst.RepointReplicas(&instanceKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -773,7 +773,7 @@ func (this *HttpAPI) LocateErrantGTID(w http.ResponseWriter, r *http.Request) { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("located errant GTID"), Details: errantBinlogs}) + Respond(w, &APIResponse{Code: OK, Message: "located errant GTID", Details: errantBinlogs}) } // ErrantGTIDResetMaster removes errant transactions on a server by way of RESET MASTER @@ -887,7 +887,7 @@ func (this *HttpAPI) MoveReplicasGTID(w http.ResponseWriter, r *http.Request) { return } - movedReplicas, _, err, errs := inst.MoveReplicasGTID(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) + movedReplicas, _, errs, err := inst.MoveReplicasGTID(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -982,7 +982,7 @@ func (this *HttpAPI) RelocateReplicas(w http.ResponseWriter, r *http.Request) { return } - replicas, _, err, errs := inst.RelocateReplicas(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) + replicas, _, errs, err := inst.RelocateReplicas(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1116,7 +1116,7 @@ func (this *HttpAPI) MultiMatchReplicas(w http.ResponseWriter, r *http.Request) return } - replicas, newMaster, err, errs := inst.MultiMatchReplicas(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) + replicas, newMaster, errs, err := inst.MultiMatchReplicas(&instanceKey, &belowKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -1137,7 +1137,7 @@ func (this *HttpAPI) MatchUpReplicas(w http.ResponseWriter, r *http.Request) { return } - replicas, newMaster, err, errs := inst.MatchUpReplicas(&instanceKey, r.URL.Query().Get("pattern")) + replicas, newMaster, errs, err := inst.MatchUpReplicas(&instanceKey, r.URL.Query().Get("pattern")) if err != nil { Respond(w, &APIResponse{Code: ERROR, Message: err.Error()}) return @@ -2903,7 +2903,7 @@ func (this *HttpAPI) Health(w http.ResponseWriter, r *http.Request) { return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Application node is healthy"), Details: health}) + Respond(w, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) } @@ -2937,7 +2937,7 @@ func (this *HttpAPI) StatusCheck(w http.ResponseWriter, r *http.Request) { renderJSON(w, 500, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health}) return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Application node is healthy"), Details: health}) + Respond(w, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health}) } // GrabElection forcibly grabs leadership. Use with care!! @@ -2952,7 +2952,7 @@ func (this *HttpAPI) GrabElection(w http.ResponseWriter, r *http.Request) { return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Node elected as leader")}) + Respond(w, &APIResponse{Code: OK, Message: "Node elected as leader"}) } // Reelect causes re-elections for an active node @@ -2967,7 +2967,7 @@ func (this *HttpAPI) Reelect(w http.ResponseWriter, r *http.Request) { return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Set re-elections")}) + Respond(w, &APIResponse{Code: OK, Message: "Set re-elections"}) } // RaftAddPeer adds a new node to the raft cluster @@ -3020,8 +3020,8 @@ func (this *HttpAPI) RaftYield(w http.ResponseWriter, r *http.Request) { Respond(w, &APIResponse{Code: ERROR, Message: "raft-yield: not running with raft setup"}) return } - orcraft.PublishYield(chi.URLParam(r, "node")) - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Asynchronously yielded")}) + _, _ = orcraft.PublishYield(chi.URLParam(r, "node")) + Respond(w, &APIResponse{Code: OK, Message: "Asynchronously yielded"}) } // RaftYieldHint yields to a host whose name contains given hint (e.g. DC) @@ -3035,7 +3035,7 @@ func (this *HttpAPI) RaftYieldHint(w http.ResponseWriter, r *http.Request) { return } hint := chi.URLParam(r, "hint") - orcraft.PublishYieldHostnameHint(hint) + _, _ = orcraft.PublishYieldHostnameHint(hint) Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Asynchronously yielded by hint %s", hint), Details: hint}) } @@ -3160,9 +3160,9 @@ func (this *HttpAPI) ReloadConfiguration(w http.ResponseWriter, r *http.Request) } extraConfigFile := r.URL.Query().Get("config") config.Reload(extraConfigFile) - inst.AuditOperation("reload-configuration", nil, "Triggered via API") + _ = inst.AuditOperation("reload-configuration", nil, "Triggered via API") - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Config reloaded"), Details: extraConfigFile}) + Respond(w, &APIResponse{Code: OK, Message: "Config reloaded", Details: extraConfigFile}) } // ReplicationAnalysis retuens list of issues @@ -3183,7 +3183,7 @@ func (this *HttpAPI) replicationAnalysis(clusterName string, instanceKey *inst.I analysis = filtered } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Analysis"), Details: analysis}) + Respond(w, &APIResponse{Code: OK, Message: "Analysis", Details: analysis}) } // ReplicationAnalysis retuens list of issues @@ -3193,9 +3193,8 @@ func (this *HttpAPI) ReplicationAnalysis(w http.ResponseWriter, r *http.Request) // ReplicationAnalysis retuens list of issues func (this *HttpAPI) ReplicationAnalysisForCluster(w http.ResponseWriter, r *http.Request) { - clusterName := chi.URLParam(r, "clusterName") - var err error + clusterName := "" if clusterName, err = inst.DeduceClusterName(chi.URLParam(r, "clusterName")); err != nil { Respond(w, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Cannot get analysis: %+v", err)}) return @@ -3398,7 +3397,7 @@ func (this *HttpAPI) AutomatedRecoveryFilters(w http.ResponseWriter, r *http.Req automatedRecoveryMap["RecoverIntermediateMasterClusterFilters"] = config.Config.RecoverIntermediateMasterClusterFilters automatedRecoveryMap["RecoveryIgnoreHostnameFilters"] = config.Config.RecoveryIgnoreHostnameFilters - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Automated recovery configuration details"), Details: automatedRecoveryMap}) + Respond(w, &APIResponse{Code: OK, Message: "Automated recovery configuration details", Details: automatedRecoveryMap}) } // AuditFailureDetection provides list of topology_failure_detection entries @@ -3541,7 +3540,7 @@ func (this *HttpAPI) AcknowledgeClusterRecoveries(w http.ResponseWriter, r *http comment := strings.TrimSpace(r.URL.Query().Get("comment")) if comment == "" { - Respond(w, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")}) + Respond(w, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"}) return } userId := getUserId(r) @@ -3560,7 +3559,7 @@ func (this *HttpAPI) AcknowledgeClusterRecoveries(w http.ResponseWriter, r *http return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged cluster recoveries"), Details: clusterName}) + Respond(w, &APIResponse{Code: OK, Message: "Acknowledged cluster recoveries", Details: clusterName}) } // ClusterInfo provides details of a given cluster @@ -3578,7 +3577,7 @@ func (this *HttpAPI) AcknowledgeInstanceRecoveries(w http.ResponseWriter, r *htt comment := strings.TrimSpace(r.URL.Query().Get("comment")) if comment == "" { - Respond(w, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")}) + Respond(w, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"}) return } userId := getUserId(r) @@ -3597,7 +3596,7 @@ func (this *HttpAPI) AcknowledgeInstanceRecoveries(w http.ResponseWriter, r *htt return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged instance recoveries"), Details: instanceKey}) + Respond(w, &APIResponse{Code: OK, Message: "Acknowledged instance recoveries", Details: instanceKey}) } // ClusterInfo provides details of a given cluster @@ -3624,7 +3623,7 @@ func (this *HttpAPI) AcknowledgeRecovery(w http.ResponseWriter, r *http.Request) } comment := strings.TrimSpace(r.URL.Query().Get("comment")) if comment == "" { - Respond(w, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")}) + Respond(w, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"}) return } userId := getUserId(r) @@ -3649,7 +3648,7 @@ func (this *HttpAPI) AcknowledgeRecovery(w http.ResponseWriter, r *http.Request) return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged recovery"), Details: idParam}) + Respond(w, &APIResponse{Code: OK, Message: "Acknowledged recovery", Details: idParam}) } // ClusterInfo provides details of a given cluster @@ -3661,7 +3660,7 @@ func (this *HttpAPI) AcknowledgeAllRecoveries(w http.ResponseWriter, r *http.Req comment := strings.TrimSpace(r.URL.Query().Get("comment")) if comment == "" { - Respond(w, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")}) + Respond(w, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"}) return } userId := getUserId(r) @@ -3681,7 +3680,7 @@ func (this *HttpAPI) AcknowledgeAllRecoveries(w http.ResponseWriter, r *http.Req return } - Respond(w, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged all recoveries"), Details: comment}) + Respond(w, &APIResponse{Code: OK, Message: "Acknowledged all recoveries", Details: comment}) } // BlockedRecoveries reads list of currently blocked recoveries, optionally filtered by cluster name diff --git a/go/http/health.go b/go/http/health.go index 300257e6..9135940d 100644 --- a/go/http/health.go +++ b/go/http/health.go @@ -30,7 +30,7 @@ import ( func HealthLive(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(map[string]string{"status": "alive"}) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "alive"}) } // HealthReady checks whether the backend DB is connected and the health check @@ -42,10 +42,10 @@ func HealthReady(w http.ResponseWriter, r *http.Request) { if healthy { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(map[string]string{"status": "ready"}) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "ready"}) } else { w.WriteHeader(http.StatusServiceUnavailable) - json.NewEncoder(w).Encode(map[string]string{"status": "not ready"}) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "not ready"}) } } @@ -65,9 +65,9 @@ func HealthLeader(w http.ResponseWriter, r *http.Request) { if isLeader { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(map[string]string{"status": "leader"}) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "leader"}) } else { w.WriteHeader(http.StatusServiceUnavailable) - json.NewEncoder(w).Encode(map[string]string{"status": "not leader"}) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "not leader"}) } } diff --git a/go/http/httpbase.go b/go/http/httpbase.go index 67df2118..1683f1da 100644 --- a/go/http/httpbase.go +++ b/go/http/httpbase.go @@ -160,19 +160,6 @@ func getUserId(req *http.Request) string { } } -func getClusterHint(params map[string]string) string { - if params["clusterHint"] != "" { - return params["clusterHint"] - } - if params["clusterName"] != "" { - return params["clusterName"] - } - if params["host"] != "" && params["port"] != "" { - return fmt.Sprintf("%s:%s", params["host"], params["port"]) - } - return "" -} - // getClusterHintFromRequest extracts the cluster hint from chi URL params. func getClusterHintFromRequest(r *http.Request) string { if v := chi.URLParam(r, "clusterHint"); v != "" { @@ -208,16 +195,6 @@ func figureClusterName(hint string) (clusterName string, err error) { return inst.FigureClusterName(hint, instanceKey, nil) } -// getClusterNameIfExists returns a cluster name by params hint, or an empty cluster name -// if no hint is given -func getClusterNameIfExists(params map[string]string) (clusterName string, err error) { - if clusterHint := getClusterHint(params); clusterHint == "" { - return "", nil - } else { - return figureClusterName(clusterHint) - } -} - // BasicAuthMiddleware returns a middleware that performs HTTP Basic Authentication. func BasicAuthMiddleware(username, password string) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { diff --git a/go/http/raft_reverse_proxy.go b/go/http/raft_reverse_proxy.go index 1f9d4bc4..e53f6d59 100644 --- a/go/http/raft_reverse_proxy.go +++ b/go/http/raft_reverse_proxy.go @@ -32,7 +32,7 @@ func raftReverseProxyMiddleware(next http.Handler) http.Handler { } u, err := url.Parse(orcraft.LeaderURI.Get()) if err != nil { - log.Errore(err) + _ = log.Errore(err) next.ServeHTTP(w, r) return } @@ -40,7 +40,7 @@ func raftReverseProxyMiddleware(next http.Handler) http.Handler { proxy := httputil.NewSingleHostReverseProxy(u) proxy.Transport, err = orcraft.GetRaftHttpTransport() if err != nil { - log.Errore(err) + _ = log.Errore(err) next.ServeHTTP(w, r) return } diff --git a/go/http/render.go b/go/http/render.go index 3ce22b93..9073be1c 100644 --- a/go/http/render.go +++ b/go/http/render.go @@ -31,7 +31,7 @@ func renderJSON(w http.ResponseWriter, status int, data interface{}) { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(status) if err := json.NewEncoder(w).Encode(data); err != nil { - log.Errore(err) + _ = log.Errore(err) } } @@ -71,14 +71,14 @@ func getTemplate(name string) (*template.Template, error) { func renderHTML(w http.ResponseWriter, status int, name string, data interface{}) { t, err := getTemplate(name) if err != nil { - log.Errorf("Error parsing template %s: %+v", name, err) + _ = log.Errorf("Error parsing template %s: %+v", name, err) http.Error(w, "Internal Server Error", http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/html; charset=UTF-8") w.WriteHeader(status) if err := t.Execute(w, data); err != nil { - log.Errorf("Error executing template %s: %+v", name, err) + _ = log.Errorf("Error executing template %s: %+v", name, err) } } diff --git a/go/http/web.go b/go/http/web.go index 21249c41..b05998d3 100644 --- a/go/http/web.go +++ b/go/http/web.go @@ -459,16 +459,16 @@ func (this *HttpWeb) RegisterDebug(router chi.Router) { router.Get(this.URLPrefix+"/debug/vars", func(w http.ResponseWriter, r *http.Request) { // from expvar.go, since the expvarHandler isn't exported :( w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") + _, _ = fmt.Fprintf(w, "{\n") first := true expvar.Do(func(kv expvar.KeyValue) { if !first { - fmt.Fprintf(w, ",\n") + _, _ = fmt.Fprintf(w, ",\n") } first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + _, _ = fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) }) - fmt.Fprintf(w, "\n}\n") + _, _ = fmt.Fprintf(w, "\n}\n") }) // list all the /debug/ endpoints we want diff --git a/go/inst/analysis.go b/go/inst/analysis.go index 1f56c263..f1aeff5a 100644 --- a/go/inst/analysis.go +++ b/go/inst/analysis.go @@ -28,50 +28,50 @@ type AnalysisCode string const ( NoProblem AnalysisCode = "NoProblem" - DeadMasterWithoutReplicas = "DeadMasterWithoutReplicas" - DeadMaster = "DeadMaster" - DeadMasterAndReplicas = "DeadMasterAndReplicas" - DeadMasterAndSomeReplicas = "DeadMasterAndSomeReplicas" - UnreachableMasterWithLaggingReplicas = "UnreachableMasterWithLaggingReplicas" - UnreachableMaster = "UnreachableMaster" - MasterSingleReplicaNotReplicating = "MasterSingleReplicaNotReplicating" - MasterSingleReplicaDead = "MasterSingleReplicaDead" - AllMasterReplicasNotReplicating = "AllMasterReplicasNotReplicating" - AllMasterReplicasNotReplicatingOrDead = "AllMasterReplicasNotReplicatingOrDead" - LockedSemiSyncMasterHypothesis = "LockedSemiSyncMasterHypothesis" - LockedSemiSyncMaster = "LockedSemiSyncMaster" - MasterWithTooManySemiSyncReplicas = "MasterWithTooManySemiSyncReplicas" - MasterWithoutReplicas = "MasterWithoutReplicas" - DeadCoMaster = "DeadCoMaster" - DeadCoMasterAndSomeReplicas = "DeadCoMasterAndSomeReplicas" - UnreachableCoMaster = "UnreachableCoMaster" - AllCoMasterReplicasNotReplicating = "AllCoMasterReplicasNotReplicating" - DeadIntermediateMaster = "DeadIntermediateMaster" - DeadIntermediateMasterWithSingleReplica = "DeadIntermediateMasterWithSingleReplica" - DeadIntermediateMasterWithSingleReplicaFailingToConnect = "DeadIntermediateMasterWithSingleReplicaFailingToConnect" - DeadIntermediateMasterAndSomeReplicas = "DeadIntermediateMasterAndSomeReplicas" - DeadIntermediateMasterAndReplicas = "DeadIntermediateMasterAndReplicas" - UnreachableIntermediateMasterWithLaggingReplicas = "UnreachableIntermediateMasterWithLaggingReplicas" - UnreachableIntermediateMaster = "UnreachableIntermediateMaster" - AllIntermediateMasterReplicasFailingToConnectOrDead = "AllIntermediateMasterReplicasFailingToConnectOrDead" - AllIntermediateMasterReplicasNotReplicating = "AllIntermediateMasterReplicasNotReplicating" - FirstTierReplicaFailingToConnectToMaster = "FirstTierReplicaFailingToConnectToMaster" - BinlogServerFailingToConnectToMaster = "BinlogServerFailingToConnectToMaster" + DeadMasterWithoutReplicas AnalysisCode = "DeadMasterWithoutReplicas" + DeadMaster AnalysisCode = "DeadMaster" + DeadMasterAndReplicas AnalysisCode = "DeadMasterAndReplicas" + DeadMasterAndSomeReplicas AnalysisCode = "DeadMasterAndSomeReplicas" + UnreachableMasterWithLaggingReplicas AnalysisCode = "UnreachableMasterWithLaggingReplicas" + UnreachableMaster AnalysisCode = "UnreachableMaster" + MasterSingleReplicaNotReplicating AnalysisCode = "MasterSingleReplicaNotReplicating" + MasterSingleReplicaDead AnalysisCode = "MasterSingleReplicaDead" + AllMasterReplicasNotReplicating AnalysisCode = "AllMasterReplicasNotReplicating" + AllMasterReplicasNotReplicatingOrDead AnalysisCode = "AllMasterReplicasNotReplicatingOrDead" + LockedSemiSyncMasterHypothesis AnalysisCode = "LockedSemiSyncMasterHypothesis" + LockedSemiSyncMaster AnalysisCode = "LockedSemiSyncMaster" + MasterWithTooManySemiSyncReplicas AnalysisCode = "MasterWithTooManySemiSyncReplicas" + MasterWithoutReplicas AnalysisCode = "MasterWithoutReplicas" + DeadCoMaster AnalysisCode = "DeadCoMaster" + DeadCoMasterAndSomeReplicas AnalysisCode = "DeadCoMasterAndSomeReplicas" + UnreachableCoMaster AnalysisCode = "UnreachableCoMaster" + AllCoMasterReplicasNotReplicating AnalysisCode = "AllCoMasterReplicasNotReplicating" + DeadIntermediateMaster AnalysisCode = "DeadIntermediateMaster" + DeadIntermediateMasterWithSingleReplica AnalysisCode = "DeadIntermediateMasterWithSingleReplica" + DeadIntermediateMasterWithSingleReplicaFailingToConnect AnalysisCode = "DeadIntermediateMasterWithSingleReplicaFailingToConnect" + DeadIntermediateMasterAndSomeReplicas AnalysisCode = "DeadIntermediateMasterAndSomeReplicas" + DeadIntermediateMasterAndReplicas AnalysisCode = "DeadIntermediateMasterAndReplicas" + UnreachableIntermediateMasterWithLaggingReplicas AnalysisCode = "UnreachableIntermediateMasterWithLaggingReplicas" + UnreachableIntermediateMaster AnalysisCode = "UnreachableIntermediateMaster" + AllIntermediateMasterReplicasFailingToConnectOrDead AnalysisCode = "AllIntermediateMasterReplicasFailingToConnectOrDead" + AllIntermediateMasterReplicasNotReplicating AnalysisCode = "AllIntermediateMasterReplicasNotReplicating" + FirstTierReplicaFailingToConnectToMaster AnalysisCode = "FirstTierReplicaFailingToConnectToMaster" + BinlogServerFailingToConnectToMaster AnalysisCode = "BinlogServerFailingToConnectToMaster" // Group replication problems DeadReplicationGroupMemberWithReplicas = "DeadReplicationGroupMemberWithReplicas" ) const ( StatementAndMixedLoggingReplicasStructureWarning AnalysisCode = "StatementAndMixedLoggingReplicasStructureWarning" - StatementAndRowLoggingReplicasStructureWarning = "StatementAndRowLoggingReplicasStructureWarning" - MixedAndRowLoggingReplicasStructureWarning = "MixedAndRowLoggingReplicasStructureWarning" - MultipleMajorVersionsLoggingReplicasStructureWarning = "MultipleMajorVersionsLoggingReplicasStructureWarning" - NoLoggingReplicasStructureWarning = "NoLoggingReplicasStructureWarning" - DifferentGTIDModesStructureWarning = "DifferentGTIDModesStructureWarning" - ErrantGTIDStructureWarning = "ErrantGTIDStructureWarning" - NoFailoverSupportStructureWarning = "NoFailoverSupportStructureWarning" - NoWriteableMasterStructureWarning = "NoWriteableMasterStructureWarning" - NotEnoughValidSemiSyncReplicasStructureWarning = "NotEnoughValidSemiSyncReplicasStructureWarning" + StatementAndRowLoggingReplicasStructureWarning AnalysisCode = "StatementAndRowLoggingReplicasStructureWarning" + MixedAndRowLoggingReplicasStructureWarning AnalysisCode = "MixedAndRowLoggingReplicasStructureWarning" + MultipleMajorVersionsLoggingReplicasStructureWarning AnalysisCode = "MultipleMajorVersionsLoggingReplicasStructureWarning" + NoLoggingReplicasStructureWarning AnalysisCode = "NoLoggingReplicasStructureWarning" + DifferentGTIDModesStructureWarning AnalysisCode = "DifferentGTIDModesStructureWarning" + ErrantGTIDStructureWarning AnalysisCode = "ErrantGTIDStructureWarning" + NoFailoverSupportStructureWarning AnalysisCode = "NoFailoverSupportStructureWarning" + NoWriteableMasterStructureWarning AnalysisCode = "NoWriteableMasterStructureWarning" + NotEnoughValidSemiSyncReplicasStructureWarning AnalysisCode = "NotEnoughValidSemiSyncReplicasStructureWarning" ) type InstanceAnalysis struct { diff --git a/go/inst/analysis_dao.go b/go/inst/analysis_dao.go index 15619346..a2ae4de3 100644 --- a/go/inst/analysis_dao.go +++ b/go/inst/analysis_dao.go @@ -24,7 +24,7 @@ import ( "github.com/proxysql/orchestrator/go/config" "github.com/proxysql/orchestrator/go/db" "github.com/proxysql/orchestrator/go/process" - orcraft "github.com/proxysql/orchestrator/go/raft" + "github.com/proxysql/orchestrator/go/util" "github.com/patrickmn/go-cache" @@ -39,8 +39,8 @@ var analysisChangeWriteCounter = metrics.NewCounter() var recentInstantAnalysis *cache.Cache func init() { - metrics.Register("analysis.change.write.attempt", analysisChangeWriteAttemptCounter) - metrics.Register("analysis.change.write", analysisChangeWriteCounter) + _ = metrics.Register("analysis.change.write.attempt", analysisChangeWriteAttemptCounter) + _ = metrics.Register("analysis.change.write", analysisChangeWriteCounter) go initializeAnalysisDaoPostConfiguration() } @@ -464,7 +464,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) a.ClusterDetails.ReadRecoveryInfo() a.Replicas = *NewInstanceKeyMap() - a.Replicas.ReadCommaDelimitedList(m.GetString("slave_hosts")) + _ = a.Replicas.ReadCommaDelimitedList(m.GetString("slave_hosts")) countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 @@ -725,7 +725,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) if a.CountReplicas > 0 && hints.AuditAnalysis { // Interesting enough for analysis - go auditInstanceAnalysisInChangelog(&a.AnalyzedInstanceKey, a.Analysis) + go func() { _ = auditInstanceAnalysisInChangelog(&a.AnalyzedInstanceKey, a.Analysis) }() } return nil }) @@ -737,36 +737,6 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) return result, log.Errore(err) } -func getConcensusReplicationAnalysis(analysisEntries []ReplicationAnalysis) ([]ReplicationAnalysis, error) { - if !orcraft.IsRaftEnabled() { - return analysisEntries, nil - } - if !config.Config.ExpectFailureAnalysisConcensus { - return analysisEntries, nil - } - concensusAnalysisEntries := []ReplicationAnalysis{} - peerAnalysisMap, err := ReadPeerAnalysisMap() - if err != nil { - return analysisEntries, err - } - quorumSize, err := orcraft.QuorumSize() - if err != nil { - return analysisEntries, err - } - - for _, analysisEntry := range analysisEntries { - instanceAnalysis := NewInstanceAnalysis(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) - analysisKey := instanceAnalysis.String() - - peerAnalysisCount := peerAnalysisMap[analysisKey] - if 1+peerAnalysisCount >= quorumSize { - // this node and enough other nodes in agreement - concensusAnalysisEntries = append(concensusAnalysisEntries, analysisEntry) - } - } - return concensusAnalysisEntries, nil -} - // auditInstanceAnalysisInChangelog will write down an instance's analysis in the database_instance_analysis_changelog table. // To not repeat recurring analysis code, the database_instance_last_analysis table is used, so that only changes to // analysis codes are written. @@ -881,7 +851,7 @@ func ReadReplicationAnalysisChangelog() (res [](*ReplicationAnalysisChangelog), }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err } diff --git a/go/inst/audit_dao.go b/go/inst/audit_dao.go index 9e44d3c4..e457b7cb 100644 --- a/go/inst/audit_dao.go +++ b/go/inst/audit_dao.go @@ -35,7 +35,7 @@ var syslogWriter *syslog.Writer var auditOperationCounter = metrics.NewCounter() func init() { - metrics.Register("audit.write", auditOperationCounter) + _ = metrics.Register("audit.write", auditOperationCounter) } // EnableSyslogWriter enables, if possible, writes to syslog. These will execute _in addition_ to normal logging @@ -60,18 +60,20 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) auditWrittenToFile := false if config.Config.AuditLogFile != "" { auditWrittenToFile = true - go func() error { + go func() { + _ = func() error { f, err := os.OpenFile(config.Config.AuditLogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0640) if err != nil { return log.Errore(err) } - defer f.Close() + defer func() { _ = f.Close() }() text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s]\t%s\t\n", time.Now().Format(log.TimeFormat), auditType, instanceKey.Hostname, instanceKey.Port, clusterName, message) if _, err = f.WriteString(text); err != nil { return log.Errore(err) } return nil + }() }() } if config.Config.AuditToBackendDB { @@ -97,7 +99,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) if syslogWriter != nil { auditWrittenToFile = true go func() { - syslogWriter.Info(logMessage) + _ = syslogWriter.Info(logMessage) }() } if !auditWrittenToFile { @@ -148,7 +150,7 @@ func ReadRecentAudit(instanceKey *InstanceKey, page int) ([]Audit, error) { }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err diff --git a/go/inst/candidate_database_instance_dao.go b/go/inst/candidate_database_instance_dao.go index 2b87e678..64d4593d 100644 --- a/go/inst/candidate_database_instance_dao.go +++ b/go/inst/candidate_database_instance_dao.go @@ -17,8 +17,6 @@ package inst import ( - "fmt" - "github.com/proxysql/golib/log" "github.com/proxysql/golib/sqlutils" @@ -33,7 +31,7 @@ func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { } args := sqlutils.Args(candidate.Hostname, candidate.Port, string(candidate.PromotionRule), candidate.LastSuggestedString) - query := fmt.Sprintf(` + query := ` insert into candidate_database_instance ( hostname, port, @@ -44,10 +42,10 @@ func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { ) on duplicate key update last_suggested=values(last_suggested), promotion_rule=values(promotion_rule) - `) + ` writeFunc := func() error { _, err := db.ExecOrchestrator(query, args...) - AuditOperation("register-candidate", candidate.Key(), string(candidate.PromotionRule)) + _ = AuditOperation("register-candidate", candidate.Key(), string(candidate.PromotionRule)) return log.Errore(err) } return ExecDBWriteFunc(writeFunc) diff --git a/go/inst/cluster_alias_dao.go b/go/inst/cluster_alias_dao.go index 5cc85627..f61452d8 100644 --- a/go/inst/cluster_alias_dao.go +++ b/go/inst/cluster_alias_dao.go @@ -255,7 +255,7 @@ func ForgetLongUnseenClusterAliases() error { if err != nil { return log.Errore(err) } - AuditOperation("forget-clustr-aliases", nil, fmt.Sprintf("Forgotten aliases: %d", rows)) + _ = AuditOperation("forget-clustr-aliases", nil, fmt.Sprintf("Forgotten aliases: %d", rows)) return err } diff --git a/go/inst/dead_instance_filter.go b/go/inst/dead_instance_filter.go index 5478c62c..57ce9526 100644 --- a/go/inst/dead_instance_filter.go +++ b/go/inst/dead_instance_filter.go @@ -45,7 +45,7 @@ var DeadInstancesFilter deadInstancesFilter var deadInstancesCounter = metrics.NewCounter() func init() { - metrics.Register("discoveries.dead_instances", deadInstancesCounter) + _ = metrics.Register("discoveries.dead_instances", deadInstancesCounter) DeadInstancesFilter.deadInstances = make(map[InstanceKey]deadInstance) DeadInstancesFilter.deadInstancesMutex = sync.RWMutex{} } diff --git a/go/inst/downtime.go b/go/inst/downtime.go index 9bad4861..8c9ce5c8 100644 --- a/go/inst/downtime.go +++ b/go/inst/downtime.go @@ -55,5 +55,5 @@ func (downtime *Downtime) Ended() bool { } func (downtime *Downtime) EndsIn() time.Duration { - return downtime.EndsAt.Sub(time.Now()) + return time.Until(downtime.EndsAt) } diff --git a/go/inst/downtime_dao.go b/go/inst/downtime_dao.go index 9411df08..356cc292 100644 --- a/go/inst/downtime_dao.go +++ b/go/inst/downtime_dao.go @@ -83,7 +83,7 @@ func BeginDowntime(downtime *Downtime) (err error) { if err != nil { return log.Errore(err) } - AuditOperation("begin-downtime", downtime.Key, fmt.Sprintf("owner: %s, reason: %s", downtime.Owner, downtime.Reason)) + _ = AuditOperation("begin-downtime", downtime.Key, fmt.Sprintf("owner: %s, reason: %s", downtime.Owner, downtime.Reason)) return nil } @@ -106,7 +106,7 @@ func EndDowntime(instanceKey *InstanceKey) (wasDowntimed bool, err error) { if affected, _ := res.RowsAffected(); affected > 0 { wasDowntimed = true - AuditOperation("end-downtime", instanceKey, "") + _ = AuditOperation("end-downtime", instanceKey, "") } return wasDowntimed, err } @@ -197,7 +197,7 @@ func ExpireDowntime() error { return log.Errore(err) } if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - AuditOperation("expire-downtime", nil, fmt.Sprintf("Expired %d entries", rowsAffected)) + _ = AuditOperation("expire-downtime", nil, fmt.Sprintf("Expired %d entries", rowsAffected)) } } diff --git a/go/inst/instance.go b/go/inst/instance.go index e1df9516..ee3839c1 100644 --- a/go/inst/instance.go +++ b/go/inst/instance.go @@ -270,10 +270,7 @@ func (this *Instance) IsReplicationGroupSecondary() bool { // IsBinlogServer checks whether this is any type of a binlog server (currently only maxscale) func (this *Instance) IsBinlogServer() bool { - if this.isMaxScale() { - return true - } - return false + return this.isMaxScale() } // IsOracleMySQL checks whether this is an Oracle MySQL distribution @@ -499,7 +496,7 @@ func (this *Instance) CanReplicateFromEx(other *Instance, logContext string) (bo canReplicate, err := this.CanReplicateFrom(other) if config.Config.LowerReplicaVersionAllowed && canReplicate && err != nil { - log.Warningf("%v: %v Details: %v", logContext, logPrefix, err) + _ = log.Warningf("%v: %v Details: %v", logContext, logPrefix, err) err = nil } return canReplicate, err @@ -660,6 +657,6 @@ func (this *Instance) HumanReadableDescription() string { // TabulatedDescription returns a simple tabulated string of various properties func (this *Instance) TabulatedDescription(separator string) string { tokens := this.descriptionTokens() - description := fmt.Sprintf("%s", strings.Join(tokens, separator)) + description := strings.Join(tokens, separator) return description } diff --git a/go/inst/instance_binlog.go b/go/inst/instance_binlog.go index 90c370dd..9e5dcc3f 100644 --- a/go/inst/instance_binlog.go +++ b/go/inst/instance_binlog.go @@ -166,7 +166,7 @@ func (this *BinlogEventCursor) nextRealEvent(recursionLevel int) (*BinlogEvent, return this.nextRealEvent(recursionLevel + 1) } for _, skipSubstring := range config.Config.SkipBinlogEventsContaining { - if strings.Index(event.Info, skipSubstring) >= 0 { + if strings.Contains(event.Info, skipSubstring) { // Recursion might go deeper here. return this.nextRealEvent(recursionLevel + 1) } diff --git a/go/inst/instance_binlog_dao.go b/go/inst/instance_binlog_dao.go index 06d104b8..c7d20c48 100644 --- a/go/inst/instance_binlog_dao.go +++ b/go/inst/instance_binlog_dao.go @@ -31,7 +31,6 @@ import ( ) const maxEmptyBinlogFiles int = 10 -const maxEventInfoDisplayLength int = 200 var instanceBinlogEntryCache *cache.Cache @@ -521,7 +520,7 @@ func SearchEntryInInstanceBinlogs(instance *Instance, entryText string, monotoni } } var resultCoordinates BinlogCoordinates - var found bool = false + var found bool resultCoordinates, found, err = SearchEntryInBinlog(pseudoGTIDRegexp, &instance.Key, currentBinlog.LogFile, entryText, monotonicPseudoGTIDEntries, minBinlogCoordinates) if err != nil { break @@ -660,7 +659,7 @@ const anonymousGTIDNextEvent = "SET @@SESSION.GTID_NEXT= 'ANONYMOUS'" // check if the event is one we want to skip. func specialEventToSkip(event *BinlogEvent) bool { - if event != nil && strings.Index(event.Info, anonymousGTIDNextEvent) >= 0 { + if event != nil && strings.Contains(event.Info, anonymousGTIDNextEvent) { return true } return false @@ -702,8 +701,8 @@ func GetNextBinlogCoordinatesToMatch( } var ( - beautifyCoordinatesLength int = 0 - countMatchedEvents int = 0 + beautifyCoordinatesLength = 0 + countMatchedEvents = 0 lastConsumedEventCoordinates BinlogCoordinates ) diff --git a/go/inst/instance_dao.go b/go/inst/instance_dao.go index 6358f46e..20d916cc 100644 --- a/go/inst/instance_dao.go +++ b/go/inst/instance_dao.go @@ -132,11 +132,11 @@ var writeBufferLatency = stopwatch.NewNamedStopwatch() var emptyQuotesRegexp = regexp.MustCompile(`^""$`) func init() { - metrics.Register("instance.access_denied", accessDeniedCounter) - metrics.Register("instance.read_topology", readTopologyInstanceCounter) - metrics.Register("instance.read", readInstanceCounter) - metrics.Register("instance.write", writeInstanceCounter) - writeBufferLatency.AddMany([]string{"wait", "write"}) + _ = metrics.Register("instance.access_denied", accessDeniedCounter) + _ = metrics.Register("instance.read_topology", readTopologyInstanceCounter) + _ = metrics.Register("instance.read", readInstanceCounter) + _ = metrics.Register("instance.write", writeInstanceCounter) + _ = writeBufferLatency.AddMany([]string{"wait", "write"}) writeBufferLatency.Start("wait") go initializeInstanceDao() @@ -184,7 +184,7 @@ func ExecDBWriteFunc(f func() error) error { } } m.ExecuteLatency = time.Since(m.Timestamp.Add(m.WaitLatency)) - backendWrites.Append(m) + _ = backendWrites.Append(m) <-instanceWriteChan // assume this takes no time }() res := f() @@ -216,7 +216,7 @@ func logReadTopologyInstanceError(instanceKey *InstanceKey, hint string, err err } else { msg = fmt.Sprintf("ReadTopologyInstance(%+v) %+v: %+v", *instanceKey, - strings.Replace(hint, "%", "%%", -1), // escape % + strings.ReplaceAll(hint, "%", "%%"), // escape % err) } return log.Errorf("%s", msg) @@ -379,7 +379,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, if !instanceKey.IsValid() { latency.Start("backend") if err := UpdateInstanceLastAttemptedCheck(instanceKey); err != nil { - log.Errorf("ReadTopologyInstanceBufferable: %+v: %v", instanceKey, err) + _ = log.Errorf("ReadTopologyInstanceBufferable: %+v: %v", instanceKey, err) } latency.Stop("backend") return instance, instanceDiscoverySkipped, fmt.Errorf("ReadTopologyInstance will not act on invalid instance key: %+v", *instanceKey) @@ -437,10 +437,10 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, } if isMaxScale110 { // Only this is supported: - db.QueryRow("select @@server_id").Scan(&instance.ServerID) + _ = db.QueryRow("select @@server_id").Scan(&instance.ServerID) } else { - db.QueryRow("select @@global.server_id").Scan(&instance.ServerID) - db.QueryRow("select @@global.server_uuid").Scan(&instance.ServerUUID) + _ = db.QueryRow("select @@global.server_id").Scan(&instance.ServerID) + _ = db.QueryRow("select @@global.server_uuid").Scan(&instance.ServerUUID) } } else { // NOT MaxScale @@ -513,7 +513,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.IsDetachedMaster = instance.MasterKey.IsDetached() instance.SecondsBehindMaster = m.GetNullInt64(instance.QSP.seconds_behind_master()) if instance.SecondsBehindMaster.Valid && instance.SecondsBehindMaster.Int64 < 0 { - log.Warningf("Host: %+v, instance.SecondsBehindMaster < 0 [%+v], correcting to 0", instanceKey, instance.SecondsBehindMaster.Int64) + _ = log.Warningf("Host: %+v, instance.SecondsBehindMaster < 0 [%+v], correcting to 0", instanceKey, instance.SecondsBehindMaster.Int64) instance.SecondsBehindMaster.Int64 = 0 } // And until told otherwise: @@ -600,23 +600,24 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, err := sqlutils.QueryRowsMap(db, "show global variables like 'rpl_semi_sync_%'", func(m sqlutils.RowMap) error { variableName := m.GetString("Variable_name") // Learn if semi-sync plugin is loaded and what is its version - if variableName == "rpl_semi_sync_master_enabled" { + switch variableName { + case "rpl_semi_sync_master_enabled": instance.SemiSyncMasterEnabled = (m.GetString("Value") == "ON") semiSyncMasterPluginLoaded = true instance.SemiSyncMasterPluginNewVersion = false - } else if variableName == "rpl_semi_sync_source_enabled" { + case "rpl_semi_sync_source_enabled": instance.SemiSyncMasterEnabled = (m.GetString("Value") == "ON") semiSyncMasterPluginLoaded = true instance.SemiSyncMasterPluginNewVersion = true - } else if variableName == "rpl_semi_sync_slave_enabled" { + case "rpl_semi_sync_slave_enabled": instance.SemiSyncReplicaEnabled = (m.GetString("Value") == "ON") semiSyncReplicaPluginLoaded = true instance.SemiSyncReplicaPluginNewVersion = false - } else if variableName == "rpl_semi_sync_replica_enabled" { + case "rpl_semi_sync_replica_enabled": instance.SemiSyncReplicaEnabled = (m.GetString("Value") == "ON") semiSyncReplicaPluginLoaded = true instance.SemiSyncReplicaPluginNewVersion = true - } else { + default: // additional info matched, regexperr := regexp.MatchString("^rpl_semi_sync_(master|source)_timeout$", variableName) if regexperr != nil { @@ -782,7 +783,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, defer waitGroup.Done() if err := db.QueryRow(config.Config.ReplicationLagQuery).Scan(&instance.ReplicationLagSeconds); err == nil { if instance.ReplicationLagSeconds.Valid && instance.ReplicationLagSeconds.Int64 < 0 { - log.Warningf("Host: %+v, instance.SlaveLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.ReplicationLagSeconds.Int64) + _ = log.Warningf("Host: %+v, instance.SlaveLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.ReplicationLagSeconds.Int64) instance.ReplicationLagSeconds.Int64 = 0 } } else { @@ -968,7 +969,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, if config.Config.AutoPseudoGTID { var err error instance.UsingPseudoGTID, err = isInjectedPseudoGTID(instance.ClusterName) - log.Errore(err) + _ = log.Errore(err) } else if config.Config.DetectPseudoGTIDQuery != "" { waitGroup.Add(1) go func() { @@ -1102,7 +1103,7 @@ Cleanup: redactedMasterExecutedGtidSet, _ := NewOracleGtidSet(instance.masterExecutedGtidSet) redactedMasterExecutedGtidSet.RemoveUUID(instance.MasterUUID) - db.QueryRow("select gtid_subtract(?, ?)", redactedExecutedGtidSet.String(), redactedMasterExecutedGtidSet.String()).Scan(&instance.GtidErrant) + _ = db.QueryRow("select gtid_subtract(?, ?)", redactedExecutedGtidSet.String(), redactedMasterExecutedGtidSet.String()).Scan(&instance.GtidErrant) } } } @@ -1260,7 +1261,7 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { clusterNameByCoMasterKey := instance.MasterKey.StringCode() if clusterName != clusterNameByInstanceKey && clusterName != clusterNameByCoMasterKey { // Can be caused by a co-master topology failover - log.Errorf("ReadInstanceClusterAttributes: in co-master topology %s is not in (%s, %s). Forcing it to become one of them", clusterName, clusterNameByInstanceKey, clusterNameByCoMasterKey) + _ = log.Errorf("ReadInstanceClusterAttributes: in co-master topology %s is not in (%s, %s). Forcing it to become one of them", clusterName, clusterNameByInstanceKey, clusterNameByCoMasterKey) clusterName = math.TernaryString(instance.Key.SmallerThan(&instance.MasterKey), clusterNameByInstanceKey, clusterNameByCoMasterKey) } if clusterName == clusterNameByInstanceKey { @@ -1319,7 +1320,7 @@ func BulkReadInstance() ([](*InstanceKey), error) { } func ReadInstancePromotionRule(instance *Instance) (err error) { - var promotionRule CandidatePromotionRule = NeutralPromoteRule + var promotionRule = NeutralPromoteRule query := ` select ifnull(nullif(promotion_rule, ''), 'neutral') as promotion_rule @@ -1421,7 +1422,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.InstanceAlias = m.GetString("instance_alias") instance.LastDiscoveryLatency = time.Duration(m.GetInt64("last_discovery_latency")) * time.Nanosecond - instance.Replicas.ReadJson(replicasJSON) + _ = instance.Replicas.ReadJson(replicasJSON) instance.applyFlavorName() /* Read Group Replication variables below */ @@ -1431,7 +1432,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.ReplicationGroupMemberRole = m.GetString("replication_group_member_role") instance.ReplicationGroupPrimaryInstanceKey = InstanceKey{Hostname: m.GetString("replication_group_primary_host"), Port: m.GetInt("replication_group_primary_port")} - instance.ReplicationGroupMembers.ReadJson(m.GetString("replication_group_members")) + _ = instance.ReplicationGroupMembers.ReadJson(m.GetString("replication_group_members")) //instance.ReplicationGroup = m.GetString("replication_group_") // problems @@ -1536,7 +1537,7 @@ func ReadInstance(instanceKey *InstanceKey) (*Instance, bool, error) { // ReadClusterInstances reads all instances of a given cluster func ReadClusterInstances(clusterName string) ([](*Instance), error) { - if strings.Index(clusterName, "'") >= 0 { + if strings.Contains(clusterName, "'") { return [](*Instance){}, log.Errorf("Invalid cluster name: %s", clusterName) } condition := `cluster_name = ?` @@ -2101,14 +2102,14 @@ func ReviewUnseenInstances() error { masterHostname, err := ResolveHostname(instance.MasterKey.Hostname) if err != nil { - log.Errore(err) + _ = log.Errore(err) continue } instance.MasterKey.Hostname = masterHostname savedClusterName := instance.ClusterName if err := ReadInstanceClusterAttributes(instance); err != nil { - log.Errore(err) + _ = log.Errore(err) } else if instance.ClusterName != savedClusterName { updateInstanceClusterName(instance) operations++ @@ -2345,7 +2346,7 @@ func ReadCountMySQLSnapshots(hostnames []string) (map[string]int, error) { }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err } @@ -2541,7 +2542,7 @@ func ReadAllInstanceKeys() ([]InstanceKey, error) { err := db.QueryOrchestrator(query, sqlutils.Args(), func(m sqlutils.RowMap) error { instanceKey, merr := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) if merr != nil { - log.Errore(merr) + _ = log.Errore(merr) } else if !InstanceIsForgotten(instanceKey) { // only if not in "forget" cache res = append(res, *instanceKey) @@ -2606,7 +2607,7 @@ func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { err := db.QueryOrchestrator(query, args, func(m sqlutils.RowMap) error { instanceKey, merr := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) if merr != nil { - log.Errore(merr) + _ = log.Errore(merr) } else if !InstanceIsForgotten(instanceKey) { // only if not in "forget" cache res = append(res, *instanceKey) @@ -2616,7 +2617,7 @@ func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { }) if err != nil { - log.Errore(err) + _ = log.Errore(err) } return res, err @@ -2634,11 +2635,11 @@ func mkInsertOdku(table string, columns []string, values []string, nrRows int, i } var q bytes.Buffer - var ignore string = "" + var ignore = "" if insertIgnore { ignore = "ignore" } - var valRow string = fmt.Sprintf("(%s)", strings.Join(values, ", ")) + var valRow = fmt.Sprintf("(%s)", strings.Join(values, ", ")) var val bytes.Buffer val.WriteString(valRow) for i := 1; i < nrRows; i++ { @@ -2646,7 +2647,7 @@ func mkInsertOdku(table string, columns []string, values []string, nrRows int, i val.WriteString(valRow) } - var col string = strings.Join(columns, ", ") + var col = strings.Join(columns, ", ") var odku bytes.Buffer odku.WriteString(fmt.Sprintf("%s=VALUES(%s)", columns[0], columns[0])) for _, c := range columns[1:] { @@ -2671,10 +2672,8 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo return "", nil, nil } - insertIgnore := false - if !instanceWasActuallyFound { - insertIgnore = true - } + insertIgnore := !instanceWasActuallyFound + var columns = []string{ "hostname", "port", @@ -2755,7 +2754,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "replication_group_primary_port", } - var values []string = make([]string, len(columns), len(columns)) + var values = make([]string, len(columns)) for i := range columns { values[i] = "?" } @@ -2882,7 +2881,7 @@ func writeManyInstances(instances []*Instance, instanceWasActuallyFound bool, up } if _, err := db.ExecOrchestrator(sql, args...); err != nil { if strings.Contains(err.Error(), tooManyPlaceholders) { - return fmt.Errorf("writeManyInstances(?,%v,%v): error: %+v, len(instances): %v, len(args): %v. Reduce InstanceWriteBufferSize to avoid len(args) being > 64k, a limit in the MySQL source code.", + return fmt.Errorf("writeManyInstances(?,%v,%v): error: %+v, len(instances): %v, len(args): %v. reduce InstanceWriteBufferSize to avoid len(args) being > 64k, a limit in the MySQL source code", instanceWasActuallyFound, updateLastSeen, err.Error(), @@ -2977,12 +2976,12 @@ func flushInstanceWriteBuffer() { } err := ExecDBWriteFunc(writeFunc) if err != nil { - log.Errorf("flushInstanceWriteBuffer: %v", err) + _ = log.Errorf("flushInstanceWriteBuffer: %v", err) } writeBufferLatency.Stop("write") - writeBufferMetrics.Append(&WriteBufferMetric{ + _ = writeBufferMetrics.Append(&WriteBufferMetric{ Timestamp: time.Now(), WaitLatency: writeBufferLatency.Elapsed("wait"), WriteLatency: writeBufferLatency.Elapsed("write"), @@ -3437,7 +3436,7 @@ func PopulateGroupReplicationInformation(instance *Instance, db *sql.DB) error { "%+v: %+v", instance.Key, err) } } - defer rows.Close() + defer func() { _ = rows.Close() }() foundGroupPrimary := false // Loop over the query results and populate GR instance attributes from the row that matches the instance being // probed. In addition, figure out the group primary and also add it as attribute of the instance. @@ -3461,7 +3460,7 @@ func PopulateGroupReplicationInformation(instance *Instance, db *sql.DB) error { } groupMemberKey, err := NewResolveInstanceKey(host, int(port)) if err != nil { - log.Errorf("Unable to resolve instance for group member %v:%v", host, port) + _ = log.Errorf("Unable to resolve instance for group member %v:%v", host, port) continue } // Set the replication group primary from what we find in performance_schema.replication_group_members for @@ -3479,7 +3478,7 @@ func PopulateGroupReplicationInformation(instance *Instance, db *sql.DB) error { instance.AddGroupMemberKey(groupMemberKey) // This helps us keep info on all members of the same group as the instance } } else { - log.Errorf("Unable to scan row group replication information while processing %+v, skipping the "+ + _ = log.Errorf("Unable to scan row group replication information while processing %+v, skipping the "+ "row and continuing: %+v", instance.Key, err) } } diff --git a/go/inst/instance_dao_test.go b/go/inst/instance_dao_test.go index 88bc8143..1acfa4d9 100644 --- a/go/inst/instance_dao_test.go +++ b/go/inst/instance_dao_test.go @@ -21,7 +21,7 @@ var ( ) func normalizeQuery(name string) string { - name = strings.Replace(name, "`", "", -1) + name = strings.ReplaceAll(name, "`", "") name = spacesRegexp.ReplaceAllString(name, " ") name = strings.TrimSpace(name) return name diff --git a/go/inst/instance_key.go b/go/inst/instance_key.go index b11700bf..e389bd10 100644 --- a/go/inst/instance_key.go +++ b/go/inst/instance_key.go @@ -34,7 +34,7 @@ var ( ipv4Regexp = regexp.MustCompile("^([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)$") ipv4HostPortRegexp = regexp.MustCompile("^([^:]+):([0-9]+)$") ipv4HostRegexp = regexp.MustCompile("^([^:]+)$") - ipv6HostPortRegexp = regexp.MustCompile("^\\[([:0-9a-fA-F]+)\\]:([0-9]+)$") // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 + ipv6HostPortRegexp = regexp.MustCompile(`^\[([:0-9a-fA-F]+)\]:([0-9]+)$`) // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 ipv6HostRegexp = regexp.MustCompile("^([:0-9a-fA-F]+)$") // e.g. 2001:db8:1f70::999:de8:7648:6e8 ) diff --git a/go/inst/instance_key_map_test.go b/go/inst/instance_key_map_test.go index 0f907449..d652b6ba 100644 --- a/go/inst/instance_key_map_test.go +++ b/go/inst/instance_key_map_test.go @@ -63,7 +63,7 @@ func TestInstanceKeyMapToJSON(t *testing.T) { func TestInstanceKeyMapReadJSON(t *testing.T) { json := `[{"Hostname":"host1","Port":3306},{"Hostname":"host2","Port":3306}]` m := *NewInstanceKeyMap() - m.ReadJson(json) + _ = m.ReadJson(json) test.S(t).ExpectEquals(len(m), 2) test.S(t).ExpectTrue(m[key1]) test.S(t).ExpectTrue(m[key2]) diff --git a/go/inst/instance_test.go b/go/inst/instance_test.go index 7648a2c8..df1437e4 100644 --- a/go/inst/instance_test.go +++ b/go/inst/instance_test.go @@ -32,7 +32,6 @@ func init() { var instance1 = Instance{Key: key1} var instance2 = Instance{Key: key2} -var instance3 = Instance{Key: key3} func TestIsSmallerMajorVersion(t *testing.T) { i55 := Instance{Version: "5.5"} diff --git a/go/inst/instance_topology.go b/go/inst/instance_topology.go index f27e286c..7f7f3d04 100644 --- a/go/inst/instance_topology.go +++ b/go/inst/instance_topology.go @@ -35,8 +35,8 @@ type StopReplicationMethod string const ( NoStopReplication StopReplicationMethod = "NoStopReplication" - StopReplicationNormal = "StopReplicationNormal" - StopReplicationNice = "StopReplicationNice" + StopReplicationNormal StopReplicationMethod = "StopReplicationNormal" + StopReplicationNice StopReplicationMethod = "StopReplicationNice" ) var ReplicationNotRunningError = fmt.Errorf("Replication not running") @@ -262,7 +262,7 @@ func MoveEquivalent(instanceKey, otherKey *InstanceKey) (*Instance, error) { err = fmt.Errorf("MoveEquivalent(): ExecBinlogCoordinates changed after stopping replication on %+v; aborting", instance.Key) goto Cleanup } - instance, err = ChangeMasterTo(instanceKey, otherKey, binlogCoordinates, false, GTIDHintNeutral) + _, err = ChangeMasterTo(instanceKey, otherKey, binlogCoordinates, false, GTIDHintNeutral) Cleanup: instance, _ = StartReplication(instanceKey) @@ -299,7 +299,7 @@ func MoveUp(instanceKey *InstanceKey) (*Instance, error) { return instance, fmt.Errorf("master is not a replica itself: %+v", master.Key) } - if canReplicate, err := instance.CanReplicateFromEx(master, "MoveUp()"); canReplicate == false { + if canReplicate, err := instance.CanReplicateFromEx(master, "MoveUp()"); !canReplicate { return instance, err } if master.IsBinlogServer() { @@ -335,14 +335,14 @@ func MoveUp(instanceKey *InstanceKey) (*Instance, error) { } if !instance.UsingMariaDBGTID { - instance, err = StartReplicationUntilMasterCoordinates(instanceKey, &master.SelfBinlogCoordinates) + _, err = StartReplicationUntilMasterCoordinates(instanceKey, &master.SelfBinlogCoordinates) if err != nil { goto Cleanup } } // We can skip hostname unresolve; we just copy+paste whatever our master thinks of its master. - instance, err = ChangeMasterTo(instanceKey, &master.MasterKey, &master.ExecBinlogCoordinates, true, GTIDHintDeny) + _, err = ChangeMasterTo(instanceKey, &master.MasterKey, &master.ExecBinlogCoordinates, true, GTIDHintDeny) if err != nil { goto Cleanup } @@ -364,7 +364,7 @@ Cleanup: // MoveUpReplicas will attempt moving up all replicas of a given instance, at the same time. // Clock-time, this is fater than moving one at a time. However this means all replicas of the given instance, and the instance itself, // will all stop replicating together. -func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) { +func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *Instance, []error, error) { res := [](*Instance){} errs := []error{} replicaMutex := make(chan bool, 1) @@ -372,29 +372,29 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I instance, err := ReadTopologyInstance(instanceKey) if err != nil { - return res, nil, err, errs + return res, nil, errs, err } if !instance.IsReplica() { - return res, instance, fmt.Errorf("instance is not a replica: %+v", instanceKey), errs + return res, instance, errs, fmt.Errorf("instance is not a replica: %+v", instanceKey) } _, err = GetInstanceMaster(instance) if err != nil { - return res, instance, log.Errorf("Cannot GetInstanceMaster() for %+v. error=%+v", instance.Key, err), errs + return res, instance, errs, log.Errorf("Cannot GetInstanceMaster() for %+v. error=%+v", instance.Key, err) } if instance.IsBinlogServer() { - replicas, err, errors := RepointReplicasTo(instanceKey, pattern, &instance.MasterKey) + replicas, errors, err := RepointReplicasTo(instanceKey, pattern, &instance.MasterKey) // Bail out! - return replicas, instance, err, errors + return replicas, instance, errors, err } replicas, err := ReadReplicaInstances(instanceKey) if err != nil { - return res, instance, err, errs + return res, instance, errs, err } replicas = filterInstancesByPattern(replicas, pattern) if len(replicas) == 0 { - return res, instance, nil, errs + return res, instance, errs, nil } log.Infof("Will move replicas of %+v up the topology", *instanceKey) @@ -429,7 +429,7 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I var replicaErr error ExecuteOnTopology(func() { - if canReplicate, err := replica.CanReplicateFromEx(instance, "MoveUpReplicas()"); canReplicate == false || err != nil { + if canReplicate, err := replica.CanReplicateFromEx(instance, "MoveUpReplicas()"); !canReplicate || err != nil { replicaErr = err return } @@ -479,15 +479,15 @@ func MoveUpReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), *I Cleanup: instance, _ = StartReplication(instanceKey) if err != nil { - return res, instance, log.Errore(err), errs + return res, instance, errs, log.Errore(err) } if len(errs) == len(replicas) { // All returned with error - return res, instance, log.Error("Error on all operations"), errs + return res, instance, errs, log.Error("Error on all operations") } AuditOperation("move-up-replicas", instanceKey, fmt.Sprintf("moved up %d/%d replicas of %+v. New master: %+v", len(res), len(replicas), *instanceKey, instance.MasterKey)) - return res, instance, err, errs + return res, instance, errs, err } // MoveBelow will attempt moving instance indicated by instanceKey below its supposed sibling indicated by sinblingKey. @@ -555,7 +555,7 @@ func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) { goto Cleanup } if instance.ExecBinlogCoordinates.SmallerThan(&sibling.ExecBinlogCoordinates) { - instance, err = StartReplicationUntilMasterCoordinates(instanceKey, &sibling.ExecBinlogCoordinates) + _, err = StartReplicationUntilMasterCoordinates(instanceKey, &sibling.ExecBinlogCoordinates) if err != nil { goto Cleanup } @@ -567,14 +567,14 @@ func MoveBelow(instanceKey, siblingKey *InstanceKey) (*Instance, error) { } // At this point both siblings have executed exact same statements and are identical - instance, err = ChangeMasterTo(instanceKey, &sibling.Key, &sibling.SelfBinlogCoordinates, false, GTIDHintDeny) + _, err = ChangeMasterTo(instanceKey, &sibling.Key, &sibling.SelfBinlogCoordinates, false, GTIDHintDeny) if err != nil { goto Cleanup } Cleanup: instance, _ = StartReplication(instanceKey) - sibling, _ = StartReplication(siblingKey) + _, _ = StartReplication(siblingKey) if err != nil { return instance, log.Errore(err) @@ -648,7 +648,7 @@ func moveInstanceBelowViaGTID(instance, otherInstance *Instance) (*Instance, err defer EndMaintenance(maintenanceToken) } - instance, err = StopReplication(instanceKey) + _, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -688,12 +688,12 @@ func MoveBelowGTID(instanceKey, otherKey *InstanceKey) (*Instance, error) { // moveReplicasViaGTID moves a list of replicas under another instance via GTID, returning those replicas // that could not be moved (do not use GTID or had GTID errors) -func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunctionsContainer *PostponedFunctionsContainer) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), err error, errs []error) { +func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunctionsContainer *PostponedFunctionsContainer) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), errs []error, err error) { replicas = RemoveNilInstances(replicas) replicas = RemoveInstance(replicas, &other.Key) if len(replicas) == 0 { // Nothing to do - return movedReplicas, unmovedReplicas, nil, errs + return movedReplicas, unmovedReplicas, errs, nil } log.Infof("moveReplicasViaGTID: Will move %+v replicas below %+v via GTID, max concurrency: %v", @@ -747,37 +747,37 @@ func moveReplicasViaGTID(replicas [](*Instance), other *Instance, postponedFunct if len(errs) == len(replicas) { // All returned with error - return movedReplicas, unmovedReplicas, fmt.Errorf("moveReplicasViaGTID: Error on all %+v operations", len(errs)), errs + return movedReplicas, unmovedReplicas, errs, fmt.Errorf("moveReplicasViaGTID: Error on all %+v operations", len(errs)) } AuditOperation("move-replicas-gtid", &other.Key, fmt.Sprintf("moved %d/%d replicas below %+v via GTID", len(movedReplicas), len(replicas), other.Key)) - return movedReplicas, unmovedReplicas, err, errs + return movedReplicas, unmovedReplicas, errs, err } // MoveReplicasGTID will (attempt to) move all replicas of given master below given instance. -func MoveReplicasGTID(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), err error, errs []error) { +func MoveReplicasGTID(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) (movedReplicas [](*Instance), unmovedReplicas [](*Instance), errs []error, err error) { belowInstance, err := ReadTopologyInstance(belowKey) if err != nil { // Can't access "below" ==> can't move replicas beneath it - return movedReplicas, unmovedReplicas, err, errs + return movedReplicas, unmovedReplicas, errs, err } // replicas involved replicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(masterKey) if err != nil { - return movedReplicas, unmovedReplicas, err, errs + return movedReplicas, unmovedReplicas, errs, err } replicas = filterInstancesByPattern(replicas, pattern) - movedReplicas, unmovedReplicas, err, errs = moveReplicasViaGTID(replicas, belowInstance, nil) + movedReplicas, unmovedReplicas, errs, err = moveReplicasViaGTID(replicas, belowInstance, nil) if err != nil { - log.Errore(err) + _ = log.Errore(err) } if len(unmovedReplicas) > 0 { err = fmt.Errorf("MoveReplicasGTID: only moved %d out of %d replicas of %+v; error is: %+v", len(movedReplicas), len(replicas), *masterKey, err) } - return movedReplicas, unmovedReplicas, err, errs + return movedReplicas, unmovedReplicas, errs, err } // Repoint connects a replica to a master using its exact same executing coordinates. @@ -844,7 +844,7 @@ func Repoint(instanceKey *InstanceKey, masterKey *InstanceKey, gtidHint Operatio if instance.ExecBinlogCoordinates.IsEmpty() { instance.ExecBinlogCoordinates.LogFile = "orchestrator-unknown-log-file" } - instance, err = ChangeMasterTo(instanceKey, masterKey, &instance.ExecBinlogCoordinates, !masterIsAccessible, gtidHint) + _, err = ChangeMasterTo(instanceKey, masterKey, &instance.ExecBinlogCoordinates, !masterIsAccessible, gtidHint) if err != nil { goto Cleanup } @@ -863,17 +863,17 @@ Cleanup: // RepointTo repoints list of replicas onto another master. // Binlog Server is the major use case -func RepointTo(replicas [](*Instance), belowKey *InstanceKey) ([](*Instance), error, []error) { +func RepointTo(replicas [](*Instance), belowKey *InstanceKey) ([](*Instance), []error, error) { res := [](*Instance){} errs := []error{} replicas = RemoveInstance(replicas, belowKey) if len(replicas) == 0 { // Nothing to do - return res, nil, errs + return res, errs, nil } if belowKey == nil { - return res, log.Errorf("RepointTo received nil belowKey"), errs + return res, errs, log.Errorf("RepointTo received nil belowKey") } log.Infof("Will repoint %+v replicas below %+v", len(replicas), *belowKey) @@ -907,28 +907,28 @@ func RepointTo(replicas [](*Instance), belowKey *InstanceKey) ([](*Instance), er if len(errs) == len(replicas) { // All returned with error - return res, log.Error("Error on all operations"), errs + return res, errs, log.Error("Error on all operations") } AuditOperation("repoint-to", belowKey, fmt.Sprintf("repointed %d/%d replicas to %+v", len(res), len(replicas), *belowKey)) - return res, nil, errs + return res, errs, nil } // RepointReplicasTo repoints replicas of a given instance (possibly filtered) onto another master. // Binlog Server is the major use case -func RepointReplicasTo(instanceKey *InstanceKey, pattern string, belowKey *InstanceKey) ([](*Instance), error, []error) { +func RepointReplicasTo(instanceKey *InstanceKey, pattern string, belowKey *InstanceKey) ([](*Instance), []error, error) { res := [](*Instance){} errs := []error{} replicas, err := ReadReplicaInstances(instanceKey) if err != nil { - return res, err, errs + return res, errs, err } replicas = RemoveInstance(replicas, belowKey) replicas = filterInstancesByPattern(replicas, pattern) if len(replicas) == 0 { // Nothing to do - return res, nil, errs + return res, errs, nil } if belowKey == nil { // Default to existing master. All replicas are of the same master, hence just pick one. @@ -939,7 +939,7 @@ func RepointReplicasTo(instanceKey *InstanceKey, pattern string, belowKey *Insta } // RepointReplicas repoints all replicas of a given instance onto its existing master. -func RepointReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), error, []error) { +func RepointReplicas(instanceKey *InstanceKey, pattern string) ([](*Instance), []error, error) { return RepointReplicasTo(instanceKey, pattern, nil) } @@ -996,7 +996,7 @@ func MakeCoMaster(instanceKey *InstanceKey) (*Instance, error) { } log.Infof("Will make %+v co-master of %+v", instanceKey, master.Key) - var gitHint OperationGTIDHint = GTIDHintNeutral + var gitHint = GTIDHintNeutral if maintenanceToken, merr := BeginMaintenance(instanceKey, GetMaintenanceOwner(), fmt.Sprintf("make co-master of %+v", master.Key)); merr != nil { err = fmt.Errorf("Cannot begin maintenance on %+v: %v", *instanceKey, merr) goto Cleanup @@ -1080,7 +1080,7 @@ func ResetReplicationOperation(instanceKey *InstanceKey) (*Instance, error) { } if instance.IsReplica() { - instance, err = StopReplication(instanceKey) + _, err = StopReplication(instanceKey) if err != nil { goto Cleanup } @@ -1369,7 +1369,7 @@ func ErrantGTIDResetMaster(instanceKey *InstanceKey) (instance *Instance, err er goto Cleanup } if !masterStatusFound { - err = fmt.Errorf("gtid-errant-reset-master: cannot get master status on %+v, after which intended to set gtid_purged to: %s.", instance.Key, gtidSubtract) + err = fmt.Errorf("gtid-errant-reset-master: cannot get master status on %+v, after which intended to set gtid_purged to: %s", instance.Key, gtidSubtract) goto Cleanup } if executedGtidSet != "" { @@ -1393,7 +1393,7 @@ func ErrantGTIDResetMaster(instanceKey *InstanceKey) (instance *Instance, err er Cleanup: var startReplicationErr error instance, startReplicationErr = StartReplication(instanceKey) - log.Errore(startReplicationErr) + _ = log.Errore(startReplicationErr) if err != nil { return instance, log.Errore(err) @@ -1700,7 +1700,7 @@ func MakeMaster(instanceKey *InstanceKey) (*Instance, error) { defer EndMaintenance(maintenanceToken) } - _, _, err, _ = MultiMatchBelow(siblings, instanceKey, nil) + _, _, _, err = MultiMatchBelow(siblings, instanceKey, nil) if err != nil { goto Cleanup } @@ -1728,7 +1728,7 @@ func TakeSiblings(instanceKey *InstanceKey) (instance *Instance, takenSiblings i if !instance.IsReplica() { return instance, takenSiblings, log.Errorf("take-siblings: instance %+v is not a replica.", *instanceKey) } - relocatedReplicas, _, err, _ := RelocateReplicas(&instance.MasterKey, instanceKey, "") + relocatedReplicas, _, _, err := RelocateReplicas(&instance.MasterKey, instanceKey, "") return instance, len(relocatedReplicas), err } @@ -1748,8 +1748,8 @@ func TakeMasterHook(successor *Instance, demoted *Instance) { env = append(env, fmt.Sprintf("ORC_SUCCESSOR_HOST=%s", successorKey)) env = append(env, fmt.Sprintf("ORC_FAILED_HOST=%s", demotedKey)) - successorStr := fmt.Sprintf("%s", successorKey) - demotedStr := fmt.Sprintf("%s", demotedKey) + successorStr := successorKey.String() + demotedStr := demotedKey.String() processCount := len(config.Config.PostTakeMasterProcesses) for i, command := range config.Config.PostTakeMasterProcesses { @@ -1761,7 +1761,7 @@ func TakeMasterHook(successor *Instance, demoted *Instance) { log.Infof("Take-Master: %s", info) } else { info := fmt.Sprintf("Execution of PostTakeMasterProcesses failed in %v with error: %v", time.Since(start), err) - log.Errorf("Take-Master: %s", info) + _ = log.Errorf("Take-Master: %s", info) } } @@ -1787,11 +1787,11 @@ func TakeMaster(instanceKey *InstanceKey, allowTakingCoMaster bool) (*Instance, return instance, err } if masterInstance.IsCoMaster && !allowTakingCoMaster { - return instance, fmt.Errorf("%+v is co-master. Cannot take it.", masterInstance.Key) + return instance, fmt.Errorf("%+v is co-master, cannot take it", masterInstance.Key) } log.Debugf("TakeMaster: will attempt making %+v take its master %+v, now resolved as %+v", *instanceKey, instance.MasterKey, masterInstance.Key) - if canReplicate, err := masterInstance.CanReplicateFromEx(instance, "TakeMaster()"); canReplicate == false { + if canReplicate, err := masterInstance.CanReplicateFromEx(instance, "TakeMaster()"); !canReplicate { return instance, err } @@ -1885,7 +1885,7 @@ func MakeLocalMaster(instanceKey *InstanceKey) (*Instance, error) { goto Cleanup } - _, _, err, _ = MultiMatchBelow(siblings, instanceKey, nil) + _, _, _, err = MultiMatchBelow(siblings, instanceKey, nil) if err != nil { goto Cleanup } @@ -1958,16 +1958,16 @@ func GetSortedReplicas(masterKey *InstanceKey, stopReplicationMethod StopReplica // MultiMatchBelow will efficiently match multiple replicas below a given instance. // It is assumed that all given replicas are siblings -func MultiMatchBelow(replicas [](*Instance), belowKey *InstanceKey, postponedFunctionsContainer *PostponedFunctionsContainer) (matchedReplicas [](*Instance), belowInstance *Instance, err error, errs []error) { +func MultiMatchBelow(replicas [](*Instance), belowKey *InstanceKey, postponedFunctionsContainer *PostponedFunctionsContainer) (matchedReplicas [](*Instance), belowInstance *Instance, errs []error, err error) { belowInstance, found, err := ReadInstance(belowKey) if err != nil || !found { - return matchedReplicas, belowInstance, err, errs + return matchedReplicas, belowInstance, errs, err } replicas = RemoveInstance(replicas, belowKey) if len(replicas) == 0 { // Nothing to do - return replicas, belowInstance, err, errs + return replicas, belowInstance, errs, err } log.Infof("Will match %+v replicas below %+v via Pseudo-GTID, independently", len(replicas), belowKey) @@ -2007,27 +2007,27 @@ func MultiMatchBelow(replicas [](*Instance), belowKey *InstanceKey, postponedFun } if len(errs) == len(replicas) { // All returned with error - return matchedReplicas, belowInstance, fmt.Errorf("MultiMatchBelowIndependently: Error on all %+v operations", len(errs)), errs + return matchedReplicas, belowInstance, errs, fmt.Errorf("MultiMatchBelowIndependently: Error on all %+v operations", len(errs)) } AuditOperation("multi-match-below-independent", belowKey, fmt.Sprintf("matched %d/%d replicas below %+v via Pseudo-GTID", len(matchedReplicas), len(replicas), belowKey)) - return matchedReplicas, belowInstance, err, errs + return matchedReplicas, belowInstance, errs, err } // MultiMatchReplicas will match (via pseudo-gtid) all replicas of given master below given instance. -func MultiMatchReplicas(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) { +func MultiMatchReplicas(masterKey *InstanceKey, belowKey *InstanceKey, pattern string) ([](*Instance), *Instance, []error, error) { res := [](*Instance){} errs := []error{} belowInstance, err := ReadTopologyInstance(belowKey) if err != nil { // Can't access "below" ==> can't match replicas beneath it - return res, nil, err, errs + return res, nil, errs, err } masterInstance, found, err := ReadInstance(masterKey) if err != nil || !found { - return res, nil, err, errs + return res, nil, errs, err } // See if we have a binlog server case (special handling): @@ -2046,9 +2046,9 @@ func MultiMatchReplicas(masterKey *InstanceKey, belowKey *InstanceKey, pattern s binlogCase = true } if binlogCase { - replicas, err, errors := RepointReplicasTo(masterKey, pattern, belowKey) + replicas, errors, err := RepointReplicasTo(masterKey, pattern, belowKey) // Bail out! - return replicas, masterInstance, err, errors + return replicas, masterInstance, errors, err } // Not binlog server @@ -2056,17 +2056,17 @@ func MultiMatchReplicas(masterKey *InstanceKey, belowKey *InstanceKey, pattern s // replicas involved replicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(masterKey) if err != nil { - return res, belowInstance, err, errs + return res, belowInstance, errs, err } replicas = filterInstancesByPattern(replicas, pattern) - matchedReplicas, belowInstance, err, errs := MultiMatchBelow(replicas, &belowInstance.Key, nil) + matchedReplicas, belowInstance, errs, err := MultiMatchBelow(replicas, &belowInstance.Key, nil) if len(matchedReplicas) != len(replicas) { err = fmt.Errorf("MultiMatchReplicas: only matched %d out of %d replicas of %+v; error is: %+v", len(matchedReplicas), len(replicas), *masterKey, err) } AuditOperation("multi-match-replicas", masterKey, fmt.Sprintf("matched %d replicas under %+v", len(matchedReplicas), *belowKey)) - return matchedReplicas, belowInstance, err, errs + return matchedReplicas, belowInstance, errs, err } // MatchUp will move a replica up the replication chain, so that it becomes sibling of its master, via Pseudo-GTID @@ -2098,13 +2098,13 @@ func MatchUp(instanceKey *InstanceKey, requireInstanceMaintenance bool) (*Instan // MatchUpReplicas will move all replicas of given master up the replication chain, // so that they become siblings of their master. // This should be called when the local master dies, and all its replicas are to be resurrected via Pseudo-GTID -func MatchUpReplicas(masterKey *InstanceKey, pattern string) ([](*Instance), *Instance, error, []error) { +func MatchUpReplicas(masterKey *InstanceKey, pattern string) ([](*Instance), *Instance, []error, error) { res := [](*Instance){} errs := []error{} masterInstance, found, err := ReadInstance(masterKey) if err != nil || !found { - return res, nil, err, errs + return res, nil, errs, err } return MultiMatchReplicas(masterKey, &masterInstance.MasterKey, pattern) @@ -2250,7 +2250,7 @@ func chooseCandidateReplica(replicas [](*Instance)) (candidateReplica *Instance, // lost due to inability to replicate cannotReplicateReplicas = append(cannotReplicateReplicas, replica) if err != nil { - log.Errorf("chooseCandidateReplica(): error checking CanReplicateFrom(). replica: %v; error: %v", replica.Key, err) + _ = log.Errorf("chooseCandidateReplica(): error checking CanReplicateFrom(). replica: %v; error: %v", replica.Key, err) } } else if replica.ExecBinlogCoordinates.SmallerThan(&candidateReplica.ExecBinlogCoordinates) { laterReplicas = append(laterReplicas, replica) @@ -2295,7 +2295,7 @@ func GetCandidateReplica(masterKey *InstanceKey, forRematchPurposes bool) (*Inst if candidateReplica != nil { mostUpToDateReplica := replicas[0] if candidateReplica.ExecBinlogCoordinates.SmallerThan(&mostUpToDateReplica.ExecBinlogCoordinates) { - log.Warningf("GetCandidateReplica: chosen replica: %+v is behind most-up-to-date replica: %+v", candidateReplica.Key, mostUpToDateReplica.Key) + _ = log.Warningf("GetCandidateReplica: chosen replica: %+v is behind most-up-to-date replica: %+v", candidateReplica.Key, mostUpToDateReplica.Key) } } log.Debugf("GetCandidateReplica: candidate: %+v, ahead: %d, equal: %d, late: %d, break: %d", candidateReplica.Key, len(aheadReplicas), len(equalReplicas), len(laterReplicas), len(cannotReplicateReplicas)) @@ -2381,7 +2381,7 @@ func RegroupReplicasPseudoGTID( log.Debugf("RegroupReplicas: multi matching %d later replicas", len(laterReplicas)) // As for the laterReplicas, we'll have to apply pseudo GTID - laterReplicas, candidateReplica, err, _ = MultiMatchBelow(laterReplicas, &candidateReplica.Key, postponedFunctionsContainer) + laterReplicas, candidateReplica, _, err = MultiMatchBelow(laterReplicas, &candidateReplica.Key, postponedFunctionsContainer) operatedReplicas := append(equalReplicas, candidateReplica) operatedReplicas = append(operatedReplicas, laterReplicas...) @@ -2551,7 +2551,7 @@ func RegroupReplicasGTID( moveGTIDFunc := func() error { log.Debugf("RegroupReplicasGTID: working on %d replicas", len(replicasToMove)) - movedReplicas, unmovedReplicas, err, _ = moveReplicasViaGTID(replicasToMove, candidateReplica, postponedFunctionsContainer) + movedReplicas, unmovedReplicas, _, err = moveReplicasViaGTID(replicasToMove, candidateReplica, postponedFunctionsContainer) unmovedReplicas = append(unmovedReplicas, aheadReplicas...) return log.Errore(err) } @@ -2587,7 +2587,7 @@ func RegroupReplicasBinlogServers(masterKey *InstanceKey, returnReplicaEvenOnFai return resultOnError(err) } - repointedBinlogServers, err, _ = RepointTo(binlogServerReplicas, &promotedBinlogServer.Key) + repointedBinlogServers, _, err = RepointTo(binlogServerReplicas, &promotedBinlogServer.Key) if err != nil { return resultOnError(err) @@ -2651,7 +2651,7 @@ func RegroupReplicas(masterKey *InstanceKey, returnReplicaEvenOnFailureToRegroup return RegroupReplicasPseudoGTID(masterKey, returnReplicaEvenOnFailureToRegroup, onCandidateReplicaChosen, postponedFunctionsContainer, nil) } // And, as last resort, we do PseudoGTID & binlog servers - log.Warningf("RegroupReplicas: unsure what method to invoke for %+v; trying Pseudo-GTID+Binlog Servers", *masterKey) + _ = log.Warningf("RegroupReplicas: unsure what method to invoke for %+v; trying Pseudo-GTID+Binlog Servers", *masterKey) return RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(masterKey, returnReplicaEvenOnFailureToRegroup, onCandidateReplicaChosen, postponedFunctionsContainer, nil) } @@ -2785,7 +2785,7 @@ func RelocateBelow(instanceKey, otherKey *InstanceKey) (*Instance, error) { // replicas of an instance below another. // It may choose to use Pseudo-GTID, or normal binlog positions, or take advantage of binlog servers, // or it may combine any of the above in a multi-step operation. -func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) ([](*Instance), error, []error) { +func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) ([](*Instance), []error, error) { errs := []error{} var err error // simplest: @@ -2810,11 +2810,11 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) // Relocate to binlog server's parent (recursive call), then repoint down otherMaster, found, err := ReadInstance(&other.MasterKey) if err != nil || !found { - return nil, err, errs + return nil, errs, err } - replicas, err, errs = relocateReplicasInternal(replicas, instance, otherMaster) + replicas, errs, err = relocateReplicasInternal(replicas, instance, otherMaster) if err != nil { - return replicas, err, errs + return replicas, errs, err } return RepointTo(replicas, &other.Key) @@ -2822,11 +2822,11 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) // GTID gtidErrorsMsg := "" { - movedReplicas, unmovedReplicas, err, errs := moveReplicasViaGTID(replicas, other, nil) + movedReplicas, unmovedReplicas, errs, err := moveReplicasViaGTID(replicas, other, nil) if len(movedReplicas) == len(replicas) { // Moved (or tried moving) everything via GTID - return movedReplicas, err, errs + return movedReplicas, errs, err } else if len(movedReplicas) > 0 { // something was moved via GTID; let's try further on return relocateReplicasInternal(unmovedReplicas, instance, other) @@ -2856,13 +2856,8 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) pseudoGTIDReplicas = append(pseudoGTIDReplicas, replica) } } - pseudoGTIDReplicas, _, err, errs = MultiMatchBelow(pseudoGTIDReplicas, &other.Key, nil) - return pseudoGTIDReplicas, err, errs - } - - // Normal binlog file:pos - if InstanceIsMasterOf(other, instance) { - // MoveUpReplicas -- but not supporting "replicas" argument at this time. + pseudoGTIDReplicas, _, errs, err = MultiMatchBelow(pseudoGTIDReplicas, &other.Key, nil) + return pseudoGTIDReplicas, errs, err } // Too complex @@ -2870,44 +2865,44 @@ func relocateReplicasInternal(replicas [](*Instance), instance, other *Instance) if len(gtidErrorsMsg) > 0 { gtidErrorsMsg = "Additional Errors: " + gtidErrorsMsg } - return nil, log.Errorf("Relocating %+v replicas of %+v below %+v turns to be too complex; please do it manually. %v", len(replicas), instance.Key, other.Key, gtidErrorsMsg), errs + return nil, errs, log.Errorf("Relocating %+v replicas of %+v below %+v turns to be too complex; please do it manually. %v", len(replicas), instance.Key, other.Key, gtidErrorsMsg) } // RelocateReplicas will attempt moving replicas of an instance indicated by instanceKey below another instance. // Orchestrator will try and figure out the best way to relocate the servers. This could span normal // binlog-position, pseudo-gtid, repointing, binlog servers... -func RelocateReplicas(instanceKey, otherKey *InstanceKey, pattern string) (replicas [](*Instance), other *Instance, err error, errs []error) { +func RelocateReplicas(instanceKey, otherKey *InstanceKey, pattern string) (replicas [](*Instance), other *Instance, errs []error, err error) { instance, found, err := ReadInstance(instanceKey) if err != nil || !found { - return replicas, other, log.Errorf("Error reading %+v", *instanceKey), errs + return replicas, other, errs, log.Errorf("Error reading %+v", *instanceKey) } other, found, err = ReadInstance(otherKey) if err != nil || !found { - return replicas, other, log.Errorf("Error reading %+v", *otherKey), errs + return replicas, other, errs, log.Errorf("Error reading %+v", *otherKey) } replicas, err = ReadReplicaInstances(instanceKey) if err != nil { - return replicas, other, err, errs + return replicas, other, errs, err } replicas = RemoveInstance(replicas, otherKey) replicas = filterInstancesByPattern(replicas, pattern) if len(replicas) == 0 { // Nothing to do - return replicas, other, nil, errs + return replicas, other, errs, nil } for _, replica := range replicas { if other.IsDescendantOf(replica) { - return replicas, other, log.Errorf("relocate-replicas: %+v is a descendant of %+v", *otherKey, replica.Key), errs + return replicas, other, errs, log.Errorf("relocate-replicas: %+v is a descendant of %+v", *otherKey, replica.Key) } } - replicas, err, errs = relocateReplicasInternal(replicas, instance, other) + replicas, errs, err = relocateReplicasInternal(replicas, instance, other) if err == nil { AuditOperation("relocate-replicas", instanceKey, fmt.Sprintf("relocated %+v replicas of %+v below %+v", len(replicas), *instanceKey, *otherKey)) } - return replicas, other, err, errs + return replicas, other, errs, err } // PurgeBinaryLogsTo attempts to 'PURGE BINARY LOGS' until given binary log is reached diff --git a/go/inst/instance_topology_dao.go b/go/inst/instance_topology_dao.go index 67a9a4be..5572e636 100644 --- a/go/inst/instance_topology_dao.go +++ b/go/inst/instance_topology_dao.go @@ -42,8 +42,8 @@ type OperationGTIDHint string const ( GTIDHintDeny OperationGTIDHint = "NoGTID" - GTIDHintNeutral = "GTIDHintNeutral" - GTIDHintForce = "GTIDHintForce" + GTIDHintNeutral OperationGTIDHint = "GTIDHintNeutral" + GTIDHintForce OperationGTIDHint = "GTIDHintForce" ) const ( @@ -171,7 +171,7 @@ func GetReplicationRestartPreserveStatements(instanceKey *InstanceKey, injectedS // FlushBinaryLogs attempts a 'FLUSH BINARY LOGS' statement on the given instance. func FlushBinaryLogs(instanceKey *InstanceKey, count int) (*Instance, error) { if *config.RuntimeCLIFlags.Noop { - return nil, fmt.Errorf("noop: aborting flush-binary-logs operation on %+v; signalling error but nothing went wrong.", *instanceKey) + return nil, fmt.Errorf("noop: aborting flush-binary-logs operation on %+v; signalling error but nothing went wrong", *instanceKey) } for i := 0; i < count; i++ { @@ -598,7 +598,7 @@ func MaybeDisableSemiSyncMaster(replicaInstance *Instance) (*Instance, error) { log.Infof("semi-sync: %s: setting rpl_semi_sync_master_enabled: %t", &replicaInstance.Key, false) replicaInstance, err := SetSemiSyncMaster(&replicaInstance.Key, false) if err != nil { - log.Warningf("semi-sync: %s: cannot disable rpl_semi_sync_master_enabled; that's not that bad though", &replicaInstance.Key) + _ = log.Warningf("semi-sync: %s: cannot disable rpl_semi_sync_master_enabled; that's not that bad though", &replicaInstance.Key) } return replicaInstance, err } @@ -1163,7 +1163,7 @@ func injectEmptyGTIDTransaction(instanceKey *InstanceKey, gtidEntry *OracleGtidS if err != nil { return err } - defer conn.Close() + defer func() { _ = conn.Close() }() if _, err := conn.ExecContext(ctx, fmt.Sprintf(`SET GTID_NEXT="%s"`, gtidEntry.String())); err != nil { return err @@ -1480,7 +1480,7 @@ func CheckAndInjectPseudoGTIDOnWriter(instance *Instance) (injected bool, err er } if !canInject { if util.ClearToLog("CheckAndInjectPseudoGTIDOnWriter", instance.Key.StringCode()) { - log.Warningf("AutoPseudoGTID enabled, but orchestrator has no privileges on %+v to inject pseudo-gtid", instance.Key) + _ = log.Warningf("AutoPseudoGTID enabled, but orchestrator has no privileges on %+v to inject pseudo-gtid", instance.Key) } return injected, nil diff --git a/go/inst/oracle_gtid_set_entry.go b/go/inst/oracle_gtid_set_entry.go index 59fb3edc..3a90f787 100644 --- a/go/inst/oracle_gtid_set_entry.go +++ b/go/inst/oracle_gtid_set_entry.go @@ -78,7 +78,7 @@ func ParseOracleGtidSetEntry(gtidRangeString string) (*OracleGtidSetEntry, error uuid := gtid_str[0] // Split the non-UUID parts into multiple blocks - s := strings.SplitN(gtid_str[1], ":", -1) + s := strings.Split(gtid_str[1], ":") var default_iv string // Default interval var tag_ivs []tagInterval // Full tagged interval diff --git a/go/inst/pool_dao.go b/go/inst/pool_dao.go index 7e1ed150..df5e196a 100644 --- a/go/inst/pool_dao.go +++ b/go/inst/pool_dao.go @@ -34,17 +34,19 @@ func writePoolInstances(pool string, instanceKeys [](*InstanceKey)) error { } tx, err := dbh.Begin() if _, err := tx.Exec(`delete from database_instance_pool where pool = ?`, pool); err != nil { - tx.Rollback() + _ = tx.Rollback() return log.Errore(err) } query := `insert into database_instance_pool (hostname, port, pool, registered_at) values (?, ?, ?, now())` for _, instanceKey := range instanceKeys { if _, err := tx.Exec(query, instanceKey.Hostname, instanceKey.Port, pool); err != nil { - tx.Rollback() + _ = tx.Rollback() return log.Errore(err) } } - tx.Commit() + if err := tx.Commit(); err != nil { + return log.Errore(err) + } return nil } diff --git a/go/inst/replication_thread_state.go b/go/inst/replication_thread_state.go index 2259aa74..4a63d2de 100644 --- a/go/inst/replication_thread_state.go +++ b/go/inst/replication_thread_state.go @@ -20,9 +20,9 @@ type ReplicationThreadState int const ( ReplicationThreadStateNoThread ReplicationThreadState = -1 - ReplicationThreadStateStopped = 0 - ReplicationThreadStateRunning = 1 - ReplicationThreadStateOther = 2 + ReplicationThreadStateStopped ReplicationThreadState = 0 + ReplicationThreadStateRunning ReplicationThreadState = 1 + ReplicationThreadStateOther ReplicationThreadState = 2 ) func ReplicationThreadStateFromStatus(status string) ReplicationThreadState { diff --git a/go/inst/resolve.go b/go/inst/resolve.go index 39d4ad9b..b6577b18 100644 --- a/go/inst/resolve.go +++ b/go/inst/resolve.go @@ -159,7 +159,7 @@ func ResolveHostname(hostname string) (string, error) { if config.Config.RejectHostnameResolvePattern != "" { // Reject, don't even cache if matched, _ := regexp.MatchString(config.Config.RejectHostnameResolvePattern, resolvedHostname); matched { - log.Warningf("ResolveHostname: %+v resolved to %+v but rejected due to RejectHostnameResolvePattern '%+v'", hostname, resolvedHostname, config.Config.RejectHostnameResolvePattern) + _ = log.Warningf("ResolveHostname: %+v resolved to %+v but rejected due to RejectHostnameResolvePattern '%+v'", hostname, resolvedHostname, config.Config.RejectHostnameResolvePattern) return hostname, nil } } diff --git a/go/inst/resolve_dao.go b/go/inst/resolve_dao.go index 5a8e6bd5..95c82f2b 100644 --- a/go/inst/resolve_dao.go +++ b/go/inst/resolve_dao.go @@ -31,11 +31,11 @@ var readUnresolvedHostnameCounter = metrics.NewCounter() var readAllResolvedHostnamesCounter = metrics.NewCounter() func init() { - metrics.Register("resolve.write_resolved", writeResolvedHostnameCounter) - metrics.Register("resolve.write_unresolved", writeUnresolvedHostnameCounter) - metrics.Register("resolve.read_resolved", readResolvedHostnameCounter) - metrics.Register("resolve.read_unresolved", readUnresolvedHostnameCounter) - metrics.Register("resolve.read_resolved_all", readAllResolvedHostnamesCounter) + _ = metrics.Register("resolve.write_resolved", writeResolvedHostnameCounter) + _ = metrics.Register("resolve.write_unresolved", writeUnresolvedHostnameCounter) + _ = metrics.Register("resolve.read_resolved", readResolvedHostnameCounter) + _ = metrics.Register("resolve.read_unresolved", readUnresolvedHostnameCounter) + _ = metrics.Register("resolve.read_resolved_all", readAllResolvedHostnamesCounter) } // WriteResolvedHostname stores a hostname and the resolved hostname to backend database @@ -77,7 +77,7 @@ func WriteResolvedHostname(hostname string, resolvedHostname string) error { // ReadResolvedHostname returns the resolved hostname given a hostname, or empty if not exists func ReadResolvedHostname(hostname string) (string, error) { - var resolvedHostname string = "" + var resolvedHostname = "" query := ` select @@ -182,33 +182,6 @@ func readUnresolvedHostname(hostname string) (string, error) { return unresolvedHostname, err } -// readMissingHostnamesToResolve gets those (unresolved, e.g. VIP) hostnames that *should* be present in -// the hostname_resolve table, but aren't. -func readMissingKeysToResolve() (result InstanceKeyMap, err error) { - query := ` - select - hostname_unresolve.unresolved_hostname, - database_instance.port - from - database_instance - join hostname_unresolve on (database_instance.hostname = hostname_unresolve.hostname) - left join hostname_resolve on (database_instance.hostname = hostname_resolve.resolved_hostname) - where - hostname_resolve.hostname is null - ` - - err = db.QueryOrchestratorRowsMap(query, func(m sqlutils.RowMap) error { - instanceKey := InstanceKey{Hostname: m.GetString("unresolved_hostname"), Port: m.GetInt("port")} - result.AddKey(instanceKey) - return nil - }) - - if err != nil { - log.Errore(err) - } - return result, err -} - // WriteHostnameUnresolve upserts an entry in hostname_unresolve func WriteHostnameUnresolve(instanceKey *InstanceKey, unresolvedHostname string) error { writeFunc := func() error { diff --git a/go/inst/tag.go b/go/inst/tag.go index 5d54d46c..c1a94a3f 100644 --- a/go/inst/tag.go +++ b/go/inst/tag.go @@ -45,7 +45,7 @@ func NewTag(tagName string, tagValue string) (*Tag, error) { } func ParseTag(tagString string) (*Tag, error) { - tagString = strings.Replace(tagString, "!", "~", -1) + tagString = strings.ReplaceAll(tagString, "!", "~") tagString = strings.TrimSpace(tagString) if submatch := negateTagEqualsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { @@ -80,7 +80,7 @@ func (tag *Tag) String() string { func (tag *Tag) Display() string { if tag.TagValue == "" { - return fmt.Sprintf("%s", tag.TagName) + return tag.TagName } else { return fmt.Sprintf("%s=%s", tag.TagName, tag.TagValue) } diff --git a/go/inst/tag_dao.go b/go/inst/tag_dao.go index 6a3387ce..a198142f 100644 --- a/go/inst/tag_dao.go +++ b/go/inst/tag_dao.go @@ -54,7 +54,7 @@ func Untag(instanceKey *InstanceKey, tag *Tag) (tagged *InstanceKeyMap, err erro if instanceKey == nil && !tag.HasValue { return nil, log.Errorf("Untag: either indicate an instance or a tag value. Will not delete on-valued tag across instances") } - clause := `` + var clause string args := sqlutils.Args() if tag.HasValue { clause = `tag_name=? and tag_value=?` diff --git a/go/kv/consul.go b/go/kv/consul.go index fa2f7f57..02b28146 100644 --- a/go/kv/consul.go +++ b/go/kv/consul.go @@ -40,8 +40,7 @@ func getConsulKVCacheKey(dc, key string) string { type consulStore struct { client *consulapi.Client kvCache *cache.Cache - pairsDistributionSuccessMutex sync.Mutex - distributionReentry int64 + distributionReentry int64 } // NewConsulStore creates a new consul store. It is possible that the client for this store is nil, @@ -63,7 +62,7 @@ func NewConsulStore() KVStore { // ConsulAclToken defaults to "" consulConfig.Token = config.Config.ConsulAclToken if client, err := consulapi.NewClient(consulConfig); err != nil { - log.Errore(err) + _ = log.Errore(err) } else { store.client = client } @@ -155,7 +154,7 @@ func (this *consulStore) DistributePairs(kvPairs [](*KVPair)) (err error) { } if _, e := this.client.KV().Put(consulPair, writeOptions); e != nil { - log.Errorf("consulStore.DistributePairs(): failed %s", kcCacheKey) + _ = log.Errorf("consulStore.DistributePairs(): failed %s", kcCacheKey) failed++ err = e } else { diff --git a/go/kv/consul_test.go b/go/kv/consul_test.go index 55cd0070..129818cb 100644 --- a/go/kv/consul_test.go +++ b/go/kv/consul_test.go @@ -3,7 +3,7 @@ package kv import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "reflect" @@ -34,7 +34,7 @@ func sortTxnKVOps(txnOps []*consulapi.TxnOp) []*consulapi.TxnOp { func buildConsulTestServer(t *testing.T, testOps []consulTestServerOp) *httptest.Server { handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestBytes, _ := ioutil.ReadAll(r.Body) + requestBytes, _ := io.ReadAll(r.Body) requestBody := strings.TrimSpace(string(requestBytes)) for _, testOp := range testOps { @@ -46,11 +46,11 @@ func buildConsulTestServer(t *testing.T, testOps []consulTestServerOp) *httptest } if r.URL.String() == "/v1/catalog/datacenters" { w.WriteHeader(testOp.ResponseCode) - json.NewEncoder(w).Encode(testOp.Response) + _ = json.NewEncoder(w).Encode(testOp.Response) return } else if strings.HasPrefix(r.URL.String(), "/v1/kv") && testOp.Response != nil { w.WriteHeader(testOp.ResponseCode) - json.NewEncoder(w).Encode(testOp.Response) + _ = json.NewEncoder(w).Encode(testOp.Response) return } else if strings.HasPrefix(r.URL.String(), "/v1/txn") { var txnOps consulapi.TxnOps @@ -62,13 +62,13 @@ func buildConsulTestServer(t *testing.T, testOps []consulTestServerOp) *httptest // https://github.com/hashicorp/consul/blob/87f6617eecd23a64add1e79eb3cd8dc3da9e649e/agent/txn_endpoint.go#L121-L129 if len(txnOps) > 64 { w.WriteHeader(http.StatusRequestEntityTooLarge) - fmt.Fprintf(w, "Transaction contains too many operations (%d > 64)", len(txnOps)) + _, _ = fmt.Fprintf(w, "Transaction contains too many operations (%d > 64)", len(txnOps)) return } testOpRequest := sortTxnKVOps(testOp.Request.(consulapi.TxnOps)) if testOp.Response != nil && reflect.DeepEqual(testOpRequest, sortTxnKVOps(txnOps)) { w.WriteHeader(testOp.ResponseCode) - json.NewEncoder(w).Encode(testOp.Response) + _ = json.NewEncoder(w).Encode(testOp.Response) return } } diff --git a/go/kv/consul_txn.go b/go/kv/consul_txn.go index 5cd77ec0..def57900 100644 --- a/go/kv/consul_txn.go +++ b/go/kv/consul_txn.go @@ -46,11 +46,7 @@ func groupKVPairsByKeyPrefix(kvPairs consulapi.KVPairs) (groups []consulapi.KVPa prefix = path[0] } } - if _, found := groupsMap[prefix]; found { - groupsMap[prefix] = append(groupsMap[prefix], pair) - } else { - groupsMap[prefix] = consulapi.KVPairs{pair} - } + groupsMap[prefix] = append(groupsMap[prefix], pair) } pairsBuf := consulapi.KVPairs{} @@ -73,8 +69,7 @@ func groupKVPairsByKeyPrefix(kvPairs consulapi.KVPairs) (groups []consulapi.KVPa type consulTxnStore struct { client *consulapi.Client kvCache *cache.Cache - pairsDistributionSuccessMutex sync.Mutex - distributionReentry int64 + distributionReentry int64 } // NewConsulTxnStore creates a new consul store that uses Consul Transactions to read/write multiple KVPairs. @@ -96,7 +91,7 @@ func NewConsulTxnStore() KVStore { // ConsulAclToken defaults to "" consulConfig.Token = config.Config.ConsulAclToken if client, err := consulapi.NewClient(consulConfig); err != nil { - log.Errore(err) + _ = log.Errore(err) } else { store.client = client } @@ -109,12 +104,12 @@ func NewConsulTxnStore() KVStore { func (this *consulTxnStore) doWriteTxn(txnOps consulapi.TxnOps, queryOptions *consulapi.QueryOptions) (err error) { ok, resp, _, err := this.client.Txn().Txn(txnOps, queryOptions) if err != nil { - log.Errorf("consulTxnStore.doWriteTxn(): %v", err) + _ = log.Errorf("consulTxnStore.doWriteTxn(): %v", err) return err } else if !ok { for _, terr := range resp.Errors { txnOp := txnOps[terr.OpIndex] - log.Errorf("consulTxnStore.doWriteTxn(): transaction error %q for KV %s on %s", terr.What, txnOp.KV.Verb, txnOp.KV.Key) + _ = log.Errorf("consulTxnStore.doWriteTxn(): transaction error %q for KV %s on %s", terr.What, txnOp.KV.Verb, txnOp.KV.Key) err = fmt.Errorf("%v", terr.What) } } @@ -166,7 +161,7 @@ func (this *consulTxnStore) updateDatacenterKVPairs(wg *sync.WaitGroup, dc strin if len(getTxnOps) > 0 { _, getTxnResp, _, terr = this.client.Txn().Txn(getTxnOps, queryOptions) if terr != nil { - log.Errorf("consulTxnStore.DistributePairs(): %v", terr) + _ = log.Errorf("consulTxnStore.DistributePairs(): %v", terr) } resp.getTxns++ } @@ -199,7 +194,7 @@ func (this *consulTxnStore) updateDatacenterKVPairs(wg *sync.WaitGroup, dc strin // update key-value pairs in a single Consul Transaction if len(setTxnOps) > 0 { if resp.err = this.doWriteTxn(setTxnOps, queryOptions); resp.err != nil { - log.Errorf("consulTxnStore.DistributePairs(): failed %v, error %v", kcCacheKeys, resp.err) + _ = log.Errorf("consulTxnStore.DistributePairs(): failed %v, error %v", kcCacheKeys, resp.err) resp.failed = len(setTxnOps) } else { for _, txnOp := range setTxnOps { diff --git a/go/kv/zk.go b/go/kv/zk.go index 85203918..94c12010 100644 --- a/go/kv/zk.go +++ b/go/kv/zk.go @@ -18,9 +18,7 @@ package kv import ( "fmt" - "math/rand" "strings" - "time" "github.com/outbrain/zookeepercli/go/zk" "github.com/proxysql/orchestrator/go/config" @@ -42,8 +40,6 @@ func NewZkStore() KVStore { store := &zkStore{} if config.Config.ZkAddress != "" { - rand.Seed(time.Now().UnixNano()) - serversArray := strings.Split(config.Config.ZkAddress, ",") zook := zk.NewZooKeeper() zook.SetServers(serversArray) @@ -52,41 +48,41 @@ func NewZkStore() KVStore { return store } -func (this *zkStore) PutKeyValue(key string, value string) (err error) { - if this.zook == nil { +func (s *zkStore) PutKeyValue(key string, value string) (err error) { + if s.zook == nil { return nil } - if _, err = this.zook.Set(normalizeKey(key), []byte(value)); err == zkconstants.ErrNoNode { + if _, err = s.zook.Set(normalizeKey(key), []byte(value)); err == zkconstants.ErrNoNode { aclstr := "" - _, err = this.zook.Create(normalizeKey(key), []byte(value), aclstr, true) + _, err = s.zook.Create(normalizeKey(key), []byte(value), aclstr, true) } return err } -func (this *zkStore) GetKeyValue(key string) (value string, found bool, err error) { - if this.zook == nil { +func (s *zkStore) GetKeyValue(key string) (value string, found bool, err error) { + if s.zook == nil { return value, false, nil } - result, err := this.zook.Get(normalizeKey(key)) + result, err := s.zook.Get(normalizeKey(key)) if err != nil { return value, false, err } return string(result), true, nil } -func (this *zkStore) PutKVPairs(kvPairs []*KVPair) (err error) { - if this.zook == nil { +func (s *zkStore) PutKVPairs(kvPairs []*KVPair) (err error) { + if s.zook == nil { return nil } for _, pair := range kvPairs { - if err := this.PutKeyValue(pair.Key, pair.Value); err != nil { + if err := s.PutKeyValue(pair.Key, pair.Value); err != nil { return err } } return nil } -func (this *zkStore) DistributePairs(kvPairs [](*KVPair)) (err error) { +func (s *zkStore) DistributePairs(kvPairs [](*KVPair)) (err error) { return nil } diff --git a/go/logic/command_applier.go b/go/logic/command_applier.go index 6a473262..684cfcac 100644 --- a/go/logic/command_applier.go +++ b/go/logic/command_applier.go @@ -114,7 +114,7 @@ func (applier *CommandApplier) injectedPseudoGTID(value []byte) interface{} { if err := json.Unmarshal(value, &clusterName); err != nil { return log.Errore(err) } - inst.RegisterInjectedPseudoGTID(clusterName) + _ = inst.RegisterInjectedPseudoGTID(clusterName) return nil } @@ -296,7 +296,7 @@ func (applier *CommandApplier) healthReport(value []byte) interface{} { if err := json.Unmarshal(value, &authenticationToken); err != nil { return log.Errore(err) } - orcraft.ReportToRaftLeader(authenticationToken) + _ = orcraft.ReportToRaftLeader(authenticationToken) return nil } diff --git a/go/logic/orchestrator.go b/go/logic/orchestrator.go index b360d7bd..acefc117 100644 --- a/go/logic/orchestrator.go +++ b/go/logic/orchestrator.go @@ -79,15 +79,15 @@ var kvFoundCache = cache.New(10*time.Minute, time.Minute) func init() { snapshotDiscoveryKeys = make(chan inst.InstanceKey, 10) - metrics.Register("discoveries.attempt", discoveriesCounter) - metrics.Register("discoveries.fail", failedDiscoveriesCounter) - metrics.Register("discoveries.instance_poll_seconds_exceeded", instancePollSecondsExceededCounter) - metrics.Register("discoveries.queue_length", discoveryQueueLengthGauge) - metrics.Register("discoveries.recent_count", discoveryRecentCountGauge) - metrics.Register("elect.is_elected", isElectedGauge) - metrics.Register("health.is_healthy", isHealthyGauge) - metrics.Register("raft.is_healthy", isRaftHealthyGauge) - metrics.Register("raft.is_leader", isRaftLeaderGauge) + _ = metrics.Register("discoveries.attempt", discoveriesCounter) + _ = metrics.Register("discoveries.fail", failedDiscoveriesCounter) + _ = metrics.Register("discoveries.instance_poll_seconds_exceeded", instancePollSecondsExceededCounter) + _ = metrics.Register("discoveries.queue_length", discoveryQueueLengthGauge) + _ = metrics.Register("discoveries.recent_count", discoveryRecentCountGauge) + _ = metrics.Register("elect.is_elected", isElectedGauge) + _ = metrics.Register("health.is_healthy", isHealthyGauge) + _ = metrics.Register("raft.is_healthy", isRaftHealthyGauge) + _ = metrics.Register("raft.is_leader", isRaftLeaderGauge) ometrics.OnMetricsTick(func() { discoveryQueueLengthGauge.Update(int64(discoveryQueue.QueueLen())) @@ -146,14 +146,14 @@ func acceptSignals() { switch sig { case syscall.SIGHUP: log.Infof("Received SIGHUP. Reloading configuration") - inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP") + _ = inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP") config.Reload() discoveryMetrics.SetExpirePeriod(time.Duration(config.Config.DiscoveryCollectionRetentionSeconds) * time.Second) case syscall.SIGTERM: log.Infof("Received SIGTERM. Shutting down orchestrator") discoveryMetrics.StopAutoExpiration() // probably should poke other go routines to stop cleanly here ... - inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM") + _ = inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM") os.Exit(0) } } @@ -189,7 +189,7 @@ func handleDiscoveryRequests() { deadInstancesDiscoveryQueue = discovery.CreateOrReturnQueue("DEADINSTANCES") // Register dead instances queue gauge only if the queue exists - metrics.Register("discoveries.dead_instances_queue_length", deadInstancesDiscoveryQueueLengthGauge) + _ = metrics.Register("discoveries.dead_instances_queue_length", deadInstancesDiscoveryQueueLengthGauge) ometrics.OnMetricsTick(func() { deadInstancesDiscoveryQueueLengthGauge.Update(int64(deadInstancesDiscoveryQueue.QueueLen())) }) @@ -233,7 +233,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { // create stopwatch entries latency := stopwatch.NewNamedStopwatch() - latency.AddMany([]string{ + _ = latency.AddMany([]string{ "backend", "instance", "total"}) @@ -244,11 +244,11 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { discoveryTime := latency.Elapsed("total") if discoveryTime > instancePollSecondsDuration() { instancePollSecondsExceededCounter.Inc(1) - log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", instanceKey, discoveryTime.Seconds()) + _ = log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", instanceKey, discoveryTime.Seconds()) } }() - instanceKey.ResolveHostname() + _, _ = instanceKey.ResolveHostname() if !instanceKey.IsValid() { return } @@ -289,7 +289,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { if instance == nil { failedDiscoveriesCounter.Inc(1) - discoveryMetrics.Append(&discovery.Metric{ + _ = discoveryMetrics.Append(&discovery.Metric{ Timestamp: time.Now(), InstanceKey: instanceKey, TotalLatency: totalLatency, @@ -298,7 +298,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { Err: err, }) if util.ClearToLog("discoverInstance", instanceKey.StringCode()) { - log.Warningf("DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", + _ = log.Warningf("DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", instanceKey, totalLatency.Seconds(), backendLatency.Seconds(), @@ -308,7 +308,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey) { return } - discoveryMetrics.Append(&discovery.Metric{ + _ = discoveryMetrics.Append(&discovery.Metric{ Timestamp: time.Now(), InstanceKey: instanceKey, TotalLatency: totalLatency, @@ -374,17 +374,17 @@ func onHealthTick() { atomic.StoreInt64(&isElectedNode, 0) } if process.SinceLastGoodHealthCheck() > yieldAfterUnhealthyDuration { - log.Errorf("Health test is failing for over %+v seconds. raft yielding", yieldAfterUnhealthyDuration.Seconds()) - orcraft.Yield() + _ = log.Errorf("Health test is failing for over %+v seconds. raft yielding", yieldAfterUnhealthyDuration.Seconds()) + _ = orcraft.Yield() } if process.SinceLastGoodHealthCheck() > fatalAfterUnhealthyDuration { - orcraft.FatalRaftError(fmt.Errorf("Node is unable to register health. Please check database connnectivity and/or time synchronisation.")) + _ = orcraft.FatalRaftError(fmt.Errorf("node is unable to register health, please check database connectivity and/or time synchronization")) } } if !orcraft.IsRaftEnabled() { myIsElectedNode, err := process.AttemptElection() if err != nil { - log.Errore(err) + _ = log.Errore(err) } if myIsElectedNode { atomic.StoreInt64(&isElectedNode, 1) @@ -404,13 +404,13 @@ func onHealthTick() { } instanceKeys, err := inst.ReadOutdatedInstanceKeys() if err != nil { - log.Errore(err) + _ = log.Errore(err) } if !wasAlreadyElected { // Just turned to be leader! - go process.RegisterNode(process.ThisNodeHealth) - go inst.ExpireMaintenance() + go func() { _, _ = process.RegisterNode(process.ThisNodeHealth) }() + go func() { _ = inst.ExpireMaintenance() }() } func() { @@ -454,7 +454,7 @@ func publishDiscoverMasters() error { if err == nil { for _, instance := range instances { key := instance.Key - go orcraft.PublishCommand("discover", key) + go func() { _, _ = orcraft.PublishCommand("discover", key) }() } } return log.Errore(err) @@ -477,10 +477,10 @@ func InjectPseudoGTIDOnWriters() error { // OK to be cached for a while. if _, found := pseudoGTIDPublishCache.Get(clusterName); !found { pseudoGTIDPublishCache.Set(clusterName, true, cache.DefaultExpiration) - orcraft.PublishCommand("injected-pseudo-gtid", clusterName) + _, _ = orcraft.PublishCommand("injected-pseudo-gtid", clusterName) } } else { - inst.RegisterInjectedPseudoGTID(clusterName) + _ = inst.RegisterInjectedPseudoGTID(clusterName) } } }() @@ -535,7 +535,7 @@ func SubmitMastersToKvStores(clusterName string, force bool) (kvPairs [](*kv.KVP } } if err := kv.DistributePairs(kvPairs); err != nil { - log.Errore(err) + _ = log.Errore(err) } return kvPairs, submittedCount, log.Errore(selectedError) } @@ -545,9 +545,9 @@ func injectSeeds(seedOnce *sync.Once) { for _, seed := range config.Config.DiscoverySeeds { instanceKey, err := inst.ParseRawInstanceKey(seed) if err == nil { - inst.InjectSeed(instanceKey) + _ = inst.InjectSeed(instanceKey) } else { - log.Errorf("Error parsing seed %s: %+v", seed, err) + _ = log.Errorf("Error parsing seed %s: %+v", seed, err) } } }) @@ -562,7 +562,7 @@ func ContinuousDiscovery() { checkAndRecoverWaitPeriod := 3 * instancePollSecondsDuration() recentDiscoveryOperationKeys = cache.New(instancePollSecondsDuration(), time.Second) - inst.LoadHostnameResolveCache() + _ = inst.LoadHostnameResolveCache() go handleDiscoveryRequests() healthTick := time.Tick(config.HealthPollSeconds * time.Second) @@ -583,20 +583,20 @@ func ContinuousDiscovery() { var seedOnce sync.Once - go ometrics.InitMetrics() - go ometrics.InitGraphiteMetrics() + go func() { _ = ometrics.InitMetrics() }() + go func() { _ = ometrics.InitGraphiteMetrics() }() go acceptSignals() go kv.InitKVStores() go proxysql.InitHook() if config.Config.RaftEnabled { if err := orcraft.Setup(NewCommandApplier(), NewSnapshotDataCreatorApplier(), process.ThisHostname); err != nil { - log.Fatale(err) + _ = log.Fatale(err) } go orcraft.Monitor() } if *config.RuntimeCLIFlags.GrabElection { - process.GrabElection() + _ = process.GrabElection() } log.Infof("continuous discovery: starting") @@ -612,54 +612,54 @@ func ContinuousDiscovery() { // But rather should invoke such routinely operations that need to be as (or roughly as) frequent // as instance poll if IsLeaderOrActive() { - go inst.UpdateClusterAliases() - go inst.ExpireDowntime() + go func() { _ = inst.UpdateClusterAliases() }() + go func() { _ = inst.ExpireDowntime() }() go injectSeeds(&seedOnce) } }() case <-autoPseudoGTIDTick: go func() { if config.Config.AutoPseudoGTID && IsLeader() { - go InjectPseudoGTIDOnWriters() + go func() { _ = InjectPseudoGTIDOnWriters() }() } }() case <-caretakingTick: // Various periodic internal maintenance tasks go func() { if IsLeaderOrActive() { - go inst.RecordInstanceCoordinatesHistory() - go inst.ReviewUnseenInstances() - go inst.InjectUnseenMasters() - - go inst.ForgetLongUnseenInstances() - go inst.ForgetLongUnseenClusterAliases() - go inst.ForgetUnseenInstancesDifferentlyResolved() - go inst.ForgetExpiredHostnameResolves() - go inst.DeleteInvalidHostnameResolves() - go inst.ResolveUnknownMasterHostnameResolves() - go inst.ExpireMaintenance() - go inst.ExpireCandidateInstances() - go inst.ExpireHostnameUnresolve() - go inst.ExpireClusterDomainName() - go inst.ExpireAudit() - go inst.ExpireMasterPositionEquivalence() - go inst.ExpirePoolInstances() - go inst.FlushNontrivialResolveCacheToDatabase() - go inst.ExpireInjectedPseudoGTID() - go inst.ExpireStaleInstanceBinlogCoordinates() - go process.ExpireNodesHistory() - go process.ExpireAccessTokens() + go func() { _ = inst.RecordInstanceCoordinatesHistory() }() + go func() { _ = inst.ReviewUnseenInstances() }() + go func() { _ = inst.InjectUnseenMasters() }() + + go func() { _ = inst.ForgetLongUnseenInstances() }() + go func() { _ = inst.ForgetLongUnseenClusterAliases() }() + go func() { _ = inst.ForgetUnseenInstancesDifferentlyResolved() }() + go func() { _ = inst.ForgetExpiredHostnameResolves() }() + go func() { _ = inst.DeleteInvalidHostnameResolves() }() + go func() { _ = inst.ResolveUnknownMasterHostnameResolves() }() + go func() { _ = inst.ExpireMaintenance() }() + go func() { _ = inst.ExpireCandidateInstances() }() + go func() { _ = inst.ExpireHostnameUnresolve() }() + go func() { _ = inst.ExpireClusterDomainName() }() + go func() { _ = inst.ExpireAudit() }() + go func() { _ = inst.ExpireMasterPositionEquivalence() }() + go func() { _ = inst.ExpirePoolInstances() }() + go func() { _ = inst.FlushNontrivialResolveCacheToDatabase() }() + go func() { _ = inst.ExpireInjectedPseudoGTID() }() + go func() { _ = inst.ExpireStaleInstanceBinlogCoordinates() }() + go func() { _ = process.ExpireNodesHistory() }() + go func() { _ = process.ExpireAccessTokens() }() go process.ExpireAvailableNodes() - go ExpireFailureDetectionHistory() - go ExpireTopologyRecoveryHistory() - go ExpireTopologyRecoveryStepsHistory() + go func() { _ = ExpireFailureDetectionHistory() }() + go func() { _ = ExpireTopologyRecoveryHistory() }() + go func() { _ = ExpireTopologyRecoveryStepsHistory() }() if runCheckAndRecoverOperationsTimeRipe() && IsLeader() { go SubmitMastersToKvStores("", false) } } else { // Take this opportunity to refresh yourself - go inst.LoadHostnameResolveCache() + go func() { _ = inst.LoadHostnameResolveCache() }() } }() case <-raftCaretakingTick: @@ -673,7 +673,7 @@ func ContinuousDiscovery() { go ClearActiveRecoveries() go ExpireBlockedRecoveries() go AcknowledgeCrashedRecoveries() - go inst.ExpireInstanceAnalysisChangelog() + go func() { _ = inst.ExpireInstanceAnalysisChangelog() }() go func() { // This function is non re-entrant (it can only be running once at any point in time) @@ -693,7 +693,7 @@ func ContinuousDiscovery() { case <-snapshotTopologiesTick: go func() { if IsLeaderOrActive() { - go inst.SnapshotTopologies() + go func() { _ = inst.SnapshotTopologies() }() } }() } @@ -702,7 +702,7 @@ func ContinuousDiscovery() { func pollAgent(hostname string) error { polledAgent, err := agent.GetAgent(hostname) - agent.UpdateAgentLastChecked(hostname) + _ = agent.UpdateAgentLastChecked(hostname) if err != nil { return log.Errore(err) @@ -735,8 +735,8 @@ func ContinuousAgentsPoll() { // See if we should also forget agents (lower frequency) select { case <-caretakingTick: - agent.ForgetLongUnseenAgents() - agent.FailStaleSeeds() + _ = agent.ForgetLongUnseenAgents() + _ = agent.FailStaleSeeds() default: } } @@ -745,6 +745,6 @@ func ContinuousAgentsPoll() { func discoverSeededAgents() { for seededAgent := range agent.SeededAgents { instanceKey := &inst.InstanceKey{Hostname: seededAgent.Hostname, Port: int(seededAgent.MySQLPort)} - go inst.ReadTopologyInstance(instanceKey) + go func() { _, _ = inst.ReadTopologyInstance(instanceKey) }() } } diff --git a/go/logic/snapshot_data.go b/go/logic/snapshot_data.go index de50b0a6..bd6e9cd0 100644 --- a/go/logic/snapshot_data.go +++ b/go/logic/snapshot_data.go @@ -115,7 +115,7 @@ func NewSnapshotDataCreatorApplier() *SnapshotDataCreatorApplier { return generator } -func (this *SnapshotDataCreatorApplier) GetData() (data []byte, err error) { +func (s *SnapshotDataCreatorApplier) GetData() (data []byte, err error) { snapshotData := CreateSnapshotData() b, err := json.Marshal(snapshotData) if err != nil { @@ -132,7 +132,7 @@ func (this *SnapshotDataCreatorApplier) GetData() (data []byte, err error) { return buf.Bytes(), nil } -func (this *SnapshotDataCreatorApplier) Restore(rc io.ReadCloser) error { +func (s *SnapshotDataCreatorApplier) Restore(rc io.ReadCloser) error { snapshotData := NewSnapshotData() zr, err := gzip.NewReader(rc) if err != nil { @@ -156,7 +156,7 @@ func (this *SnapshotDataCreatorApplier) Restore(rc io.ReadCloser) error { existingKeys, _ := inst.ReadAllInstanceKeys() for _, existingKey := range existingKeys { if !snapshotInstanceKeyMap.HasKey(existingKey) { - inst.ForgetInstance(&existingKey) + _ = inst.ForgetInstance(&existingKey) discardedKeys++ } } @@ -174,7 +174,7 @@ func (this *SnapshotDataCreatorApplier) Restore(rc io.ReadCloser) error { if err := inst.WriteInstance(minimalInstance.ToInstance(), false, nil); err == nil { discoveredKeys++ } else { - log.Errore(err) + _ = log.Errore(err) } } } diff --git a/go/logic/topology_recovery.go b/go/logic/topology_recovery.go index dd0a2517..50776a9d 100644 --- a/go/logic/topology_recovery.go +++ b/go/logic/topology_recovery.go @@ -48,9 +48,9 @@ type RecoveryType string const ( MasterRecovery RecoveryType = "MasterRecovery" - CoMasterRecovery = "CoMasterRecovery" - IntermediateMasterRecovery = "IntermediateMasterRecovery" - ReplicationGroupMemberRecovery = "ReplicationGroupMemberRecovery" + CoMasterRecovery RecoveryType = "CoMasterRecovery" + IntermediateMasterRecovery RecoveryType = "IntermediateMasterRecovery" + ReplicationGroupMemberRecovery RecoveryType = "ReplicationGroupMemberRecovery" ) type RecoveryAcknowledgement struct { @@ -141,7 +141,7 @@ func (this *TopologyRecovery) AddError(err error) error { func (this *TopologyRecovery) AddErrors(errs []error) { for _, err := range errs { - this.AddError(err) + _ = this.AddError(err) } } @@ -163,9 +163,9 @@ type MasterRecoveryType string const ( NotMasterRecovery MasterRecoveryType = "NotMasterRecovery" - MasterRecoveryGTID = "MasterRecoveryGTID" - MasterRecoveryPseudoGTID = "MasterRecoveryPseudoGTID" - MasterRecoveryBinlogServer = "MasterRecoveryBinlogServer" + MasterRecoveryGTID MasterRecoveryType = "MasterRecoveryGTID" + MasterRecoveryPseudoGTID MasterRecoveryType = "MasterRecoveryPseudoGTID" + MasterRecoveryBinlogServer MasterRecoveryType = "MasterRecoveryBinlogServer" ) var emergencyReadTopologyInstanceMap *cache.Cache @@ -200,15 +200,15 @@ var recoverDeadReplicationGroupMemberFailureCounter = metrics.NewCounter() var countPendingRecoveriesGauge = metrics.NewGauge() func init() { - metrics.Register("recover.dead_master.start", recoverDeadMasterCounter) - metrics.Register("recover.dead_master.success", recoverDeadMasterSuccessCounter) - metrics.Register("recover.dead_master.fail", recoverDeadMasterFailureCounter) - metrics.Register("recover.dead_intermediate_master.start", recoverDeadIntermediateMasterCounter) - metrics.Register("recover.dead_intermediate_master.success", recoverDeadIntermediateMasterSuccessCounter) - metrics.Register("recover.dead_intermediate_master.fail", recoverDeadIntermediateMasterFailureCounter) - metrics.Register("recover.dead_co_master.start", recoverDeadCoMasterCounter) - metrics.Register("recover.dead_co_master.success", recoverDeadCoMasterSuccessCounter) - metrics.Register("recover.dead_co_master.fail", recoverDeadCoMasterFailureCounter) + _ = metrics.Register("recover.dead_master.start", recoverDeadMasterCounter) + _ = metrics.Register("recover.dead_master.success", recoverDeadMasterSuccessCounter) + _ = metrics.Register("recover.dead_master.fail", recoverDeadMasterFailureCounter) + _ = metrics.Register("recover.dead_intermediate_master.start", recoverDeadIntermediateMasterCounter) + _ = metrics.Register("recover.dead_intermediate_master.success", recoverDeadIntermediateMasterSuccessCounter) + _ = metrics.Register("recover.dead_intermediate_master.fail", recoverDeadIntermediateMasterFailureCounter) + _ = metrics.Register("recover.dead_co_master.start", recoverDeadCoMasterCounter) + _ = metrics.Register("recover.dead_co_master.success", recoverDeadCoMasterSuccessCounter) + _ = metrics.Register("recover.dead_co_master.fail", recoverDeadCoMasterFailureCounter) metrics.Register("recover.dead_replication_group_member.start", recoverDeadReplicationGroupMemberCounter) metrics.Register("recover.dead_replication_group_member.success", recoverDeadReplicationGroupMemberSuccessCounter) metrics.Register("recover.dead_replication_group_member.fail", recoverDeadReplicationGroupMemberFailureCounter) @@ -273,44 +273,44 @@ func prepareCommand(command string, topologyRecovery *TopologyRecovery) (result command = strings.TrimRight(command, "&") async = true } - command = strings.Replace(command, "{failureType}", string(analysisEntry.Analysis), -1) - command = strings.Replace(command, "{instanceType}", string(analysisEntry.GetAnalysisInstanceType()), -1) - command = strings.Replace(command, "{isMaster}", fmt.Sprintf("%t", analysisEntry.IsMaster), -1) - command = strings.Replace(command, "{isCoMaster}", fmt.Sprintf("%t", analysisEntry.IsCoMaster), -1) - command = strings.Replace(command, "{failureDescription}", analysisEntry.Description, -1) - command = strings.Replace(command, "{command}", analysisEntry.CommandHint, -1) - command = strings.Replace(command, "{failedHost}", analysisEntry.AnalyzedInstanceKey.Hostname, -1) - command = strings.Replace(command, "{failedPort}", fmt.Sprintf("%d", analysisEntry.AnalyzedInstanceKey.Port), -1) - command = strings.Replace(command, "{failureCluster}", analysisEntry.ClusterDetails.ClusterName, -1) - command = strings.Replace(command, "{failureClusterAlias}", analysisEntry.ClusterDetails.ClusterAlias, -1) - command = strings.Replace(command, "{failureClusterDomain}", analysisEntry.ClusterDetails.ClusterDomain, -1) - command = strings.Replace(command, "{countSlaves}", fmt.Sprintf("%d", analysisEntry.CountReplicas), -1) - command = strings.Replace(command, "{countReplicas}", fmt.Sprintf("%d", analysisEntry.CountReplicas), -1) - command = strings.Replace(command, "{isDowntimed}", fmt.Sprint(analysisEntry.IsDowntimed), -1) - command = strings.Replace(command, "{autoMasterRecovery}", fmt.Sprint(analysisEntry.ClusterDetails.HasAutomatedMasterRecovery), -1) - command = strings.Replace(command, "{autoIntermediateMasterRecovery}", fmt.Sprint(analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery), -1) - command = strings.Replace(command, "{orchestratorHost}", process.ThisHostname, -1) - command = strings.Replace(command, "{recoveryUID}", topologyRecovery.UID, -1) - - command = strings.Replace(command, "{isSuccessful}", fmt.Sprint(topologyRecovery.SuccessorKey != nil), -1) + command = strings.ReplaceAll(command, "{failureType}", string(analysisEntry.Analysis)) + command = strings.ReplaceAll(command, "{instanceType}", string(analysisEntry.GetAnalysisInstanceType())) + command = strings.ReplaceAll(command, "{isMaster}", fmt.Sprintf("%t", analysisEntry.IsMaster)) + command = strings.ReplaceAll(command, "{isCoMaster}", fmt.Sprintf("%t", analysisEntry.IsCoMaster)) + command = strings.ReplaceAll(command, "{failureDescription}", analysisEntry.Description) + command = strings.ReplaceAll(command, "{command}", analysisEntry.CommandHint) + command = strings.ReplaceAll(command, "{failedHost}", analysisEntry.AnalyzedInstanceKey.Hostname) + command = strings.ReplaceAll(command, "{failedPort}", fmt.Sprintf("%d", analysisEntry.AnalyzedInstanceKey.Port)) + command = strings.ReplaceAll(command, "{failureCluster}", analysisEntry.ClusterDetails.ClusterName) + command = strings.ReplaceAll(command, "{failureClusterAlias}", analysisEntry.ClusterDetails.ClusterAlias) + command = strings.ReplaceAll(command, "{failureClusterDomain}", analysisEntry.ClusterDetails.ClusterDomain) + command = strings.ReplaceAll(command, "{countSlaves}", fmt.Sprintf("%d", analysisEntry.CountReplicas)) + command = strings.ReplaceAll(command, "{countReplicas}", fmt.Sprintf("%d", analysisEntry.CountReplicas)) + command = strings.ReplaceAll(command, "{isDowntimed}", fmt.Sprint(analysisEntry.IsDowntimed)) + command = strings.ReplaceAll(command, "{autoMasterRecovery}", fmt.Sprint(analysisEntry.ClusterDetails.HasAutomatedMasterRecovery)) + command = strings.ReplaceAll(command, "{autoIntermediateMasterRecovery}", fmt.Sprint(analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery)) + command = strings.ReplaceAll(command, "{orchestratorHost}", process.ThisHostname) + command = strings.ReplaceAll(command, "{recoveryUID}", topologyRecovery.UID) + + command = strings.ReplaceAll(command, "{isSuccessful}", fmt.Sprint(topologyRecovery.SuccessorKey != nil)) if topologyRecovery.SuccessorKey != nil { - command = strings.Replace(command, "{successorHost}", topologyRecovery.SuccessorKey.Hostname, -1) - command = strings.Replace(command, "{successorPort}", fmt.Sprintf("%d", topologyRecovery.SuccessorKey.Port), -1) + command = strings.ReplaceAll(command, "{successorHost}", topologyRecovery.SuccessorKey.Hostname) + command = strings.ReplaceAll(command, "{successorPort}", fmt.Sprintf("%d", topologyRecovery.SuccessorKey.Port)) // As long as SuccessorBinlogCoordinates != nil, we replace {successorBinlogCoordinates} // Format of the display string of binlog coordinates would be LogFile:LogPositon if topologyRecovery.SuccessorBinlogCoordinates != nil { - command = strings.Replace(command, "{successorBinlogCoordinates}", topologyRecovery.SuccessorBinlogCoordinates.DisplayString(), -1) + command = strings.ReplaceAll(command, "{successorBinlogCoordinates}", topologyRecovery.SuccessorBinlogCoordinates.DisplayString()) } // As long as SucesssorKey != nil, we replace {successorAlias}. // If SucessorAlias is "", it's fine. We'll replace {successorAlias} with "". - command = strings.Replace(command, "{successorAlias}", topologyRecovery.SuccessorAlias, -1) + command = strings.ReplaceAll(command, "{successorAlias}", topologyRecovery.SuccessorAlias) } - command = strings.Replace(command, "{lostSlaves}", topologyRecovery.LostReplicas.ToCommaDelimitedList(), -1) - command = strings.Replace(command, "{lostReplicas}", topologyRecovery.LostReplicas.ToCommaDelimitedList(), -1) - command = strings.Replace(command, "{countLostReplicas}", fmt.Sprintf("%d", len(topologyRecovery.LostReplicas)), -1) - command = strings.Replace(command, "{slaveHosts}", analysisEntry.Replicas.ToCommaDelimitedList(), -1) - command = strings.Replace(command, "{replicaHosts}", analysisEntry.Replicas.ToCommaDelimitedList(), -1) + command = strings.ReplaceAll(command, "{lostSlaves}", topologyRecovery.LostReplicas.ToCommaDelimitedList()) + command = strings.ReplaceAll(command, "{lostReplicas}", topologyRecovery.LostReplicas.ToCommaDelimitedList()) + command = strings.ReplaceAll(command, "{countLostReplicas}", fmt.Sprintf("%d", len(topologyRecovery.LostReplicas))) + command = strings.ReplaceAll(command, "{slaveHosts}", analysisEntry.Replicas.ToCommaDelimitedList()) + command = strings.ReplaceAll(command, "{replicaHosts}", analysisEntry.Replicas.ToCommaDelimitedList()) return command, async } @@ -365,7 +365,7 @@ func executeProcess(command string, env []string, topologyRecovery *TopologyReco info = fmt.Sprintf("Completed %s in %v", fullDescription, time.Since(start)) } else { info = fmt.Sprintf("Execution of %s failed in %v with error: %v", fullDescription, time.Since(start), err) - log.Errorf("%s", info) + _ = log.Errorf("%s", info) } AuditTopologyRecovery(topologyRecovery, info) return err @@ -520,14 +520,14 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey var cannotReplicateReplicas [](*inst.Instance) postponedAll := false - inst.AuditOperation("recover-dead-master", failedInstanceKey, "problem found; will recover") + _ = inst.AuditOperation("recover-dead-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return false, nil, lostReplicas, topologyRecovery.AddError(err) } } if err := proxysql.GetHook().PreFailover(failedInstanceKey.Hostname, failedInstanceKey.Port); err != nil { - log.Errorf("ProxySQL pre-failover failed (non-blocking): %v", err) + _ = log.Errorf("ProxySQL pre-failover failed (non-blocking): %v", err) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("ProxySQL pre-failover failed: %v", err)) } @@ -558,21 +558,21 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey switch topologyRecovery.RecoveryType { case MasterRecoveryGTID: { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: regrouping replicas via GTID")) + AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via GTID") lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, false, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal) } case MasterRecoveryPseudoGTID: { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: regrouping replicas via Pseudo-GTID")) + AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via Pseudo-GTID") lostReplicas, _, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal) } case MasterRecoveryBinlogServer: { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: recovering via binlog servers")) + AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: recovering via binlog servers") promotedReplica, err = recoverDeadMasterInBinlogServerTopology(topologyRecovery) } } - topologyRecovery.AddError(err) + _ = topologyRecovery.AddError(err) lostReplicas = append(lostReplicas, cannotReplicateReplicas...) for _, replica := range lostReplicas { AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: - lost replica: %+v", replica.Key)) @@ -583,7 +583,7 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: lost %+v replicas during recovery process; detaching them", len(lostReplicas))) for _, replica := range lostReplicas { replica := replica - inst.DetachReplicaMasterHost(&replica.Key) + _, _ = inst.DetachReplicaMasterHost(&replica.Key) } return nil } @@ -591,30 +591,30 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey } func() error { - inst.BeginDowntime(inst.NewDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) + _ = inst.BeginDowntime(inst.NewDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) acknowledgeInstanceFailureDetection(&analysisEntry.AnalyzedInstanceKey) for _, replica := range lostReplicas { replica := replica - inst.BeginDowntime(inst.NewDowntime(&replica.Key, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) + _ = inst.BeginDowntime(inst.NewDowntime(&replica.Key, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) } return nil }() - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) + AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: %d postponed functions", topologyRecovery.Len())) if promotedReplica != nil && !postponedAll { promotedReplica, err = replacePromotedReplicaWithCandidate(topologyRecovery, &analysisEntry.AnalyzedInstanceKey, promotedReplica, candidateInstanceKey) - topologyRecovery.AddError(err) + _ = topologyRecovery.AddError(err) } if promotedReplica == nil { message := "Failure: no replica promoted." AuditTopologyRecovery(topologyRecovery, message) - inst.AuditOperation("recover-dead-master", failedInstanceKey, message) + _ = inst.AuditOperation("recover-dead-master", failedInstanceKey, message) } else { message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) AuditTopologyRecovery(topologyRecovery, message) - inst.AuditOperation("recover-dead-master", failedInstanceKey, message) + _ = inst.AuditOperation("recover-dead-master", failedInstanceKey, message) } return true, promotedReplica, lostReplicas, err } @@ -649,9 +649,9 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // Maybe we promoted a "prefer_not" // Maybe we promoted a server in a different DC than the master // There's many options. We may wish to replace the server we promoted with a better one. - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("checking if should replace promoted replica with a better candidate")) + AuditTopologyRecovery(topologyRecovery, "checking if should replace promoted replica with a better candidate") if candidateInstanceKey == nil { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ checking if promoted replica is the ideal candidate")) + AuditTopologyRecovery(topologyRecovery, "+ checking if promoted replica is the ideal candidate") if deadInstance != nil { for _, candidateReplica := range candidateReplicas { if promotedReplica.Key.Equals(&candidateReplica.Key) && @@ -667,7 +667,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // We didn't pick the ideal candidate; let's see if we can replace with a candidate from same DC and ENV if candidateInstanceKey == nil { // Try a candidate replica that is in same DC & env as the dead instance - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for an ideal candidate")) + AuditTopologyRecovery(topologyRecovery, "+ searching for an ideal candidate") if deadInstance != nil { for _, candidateReplica := range candidateReplicas { if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) && @@ -682,7 +682,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de } if candidateInstanceKey == nil { // We cannot find a candidate in same DC and ENV as dead master - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ checking if promoted replica is an OK candidate")) + AuditTopologyRecovery(topologyRecovery, "+ checking if promoted replica is an OK candidate") for _, candidateReplica := range candidateReplicas { if promotedReplica.Key.Equals(&candidateReplica.Key) { // Seems like we promoted a candidate replica (though not in same DC and ENV as dead master) @@ -699,7 +699,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // Still nothing? if candidateInstanceKey == nil { // Try a candidate replica that is in same DC & env as the promoted replica (our promoted replica is not an "is_candidate") - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a candidate")) + AuditTopologyRecovery(topologyRecovery, "+ searching for a candidate") for _, candidateReplica := range candidateReplicas { if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) && promotedReplica.DataCenter == candidateReplica.DataCenter && @@ -713,7 +713,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // Still nothing? if candidateInstanceKey == nil { // Try a candidate replica (our promoted replica is not an "is_candidate") - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a candidate")) + AuditTopologyRecovery(topologyRecovery, "+ searching for a candidate") for _, candidateReplica := range candidateReplicas { if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) { if satisfied, reason := MasterFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, candidateReplica); satisfied { @@ -740,7 +740,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de if candidateInstanceKey == nil { // Still nothing? Then we didn't find a replica marked as "candidate". OK, further down the stream we have: // find neutral instance in same dv&env as dead master - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace promoted server, in same DC and env as dead master")) + AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace promoted server, in same DC and env as dead master") for _, neutralReplica := range neutralReplicas { if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) && deadInstance.DataCenter == neutralReplica.DataCenter && @@ -752,7 +752,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de } if candidateInstanceKey == nil { // find neutral instance in same dv&env as promoted replica - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace promoted server, in same DC and env as promoted replica")) + AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace promoted server, in same DC and env as promoted replica") for _, neutralReplica := range neutralReplicas { if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) && promotedReplica.DataCenter == neutralReplica.DataCenter && @@ -763,7 +763,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de } } if candidateInstanceKey == nil { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace a prefer_not")) + AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace a prefer_not") for _, neutralReplica := range neutralReplicas { if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) { if satisfied, reason := MasterFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, neutralReplica); satisfied { @@ -781,12 +781,12 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de // So do we have a candidate? if candidateInstanceKey == nil { // Found nothing. Stick with promoted replica - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ found no server to promote on top promoted replica")) + AuditTopologyRecovery(topologyRecovery, "+ found no server to promote on top promoted replica") return promotedReplica, false, nil } if promotedReplica.Key.Equals(candidateInstanceKey) { // Sanity. It IS the candidate, nothing to promote... - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ sanity check: found our very own server to promote; doing nothing")) + AuditTopologyRecovery(topologyRecovery, "+ sanity check: found our very own server to promote; doing nothing") return promotedReplica, false, nil } replacement, _, err = inst.ReadInstance(candidateInstanceKey) @@ -823,7 +823,7 @@ func replacePromotedReplicaWithCandidate(topologyRecovery *TopologyRecovery, dea relocateReplicasFunc := func() error { log.Debugf("replace-promoted-replica-with-candidate: relocating replicas of %+v below %+v", promotedReplica.Key, candidateInstance.Key) - relocatedReplicas, _, err, _ := inst.RelocateReplicas(&promotedReplica.Key, &candidateInstance.Key, "") + relocatedReplicas, _, _, err := inst.RelocateReplicas(&promotedReplica.Key, &candidateInstance.Key, "") log.Debugf("replace-promoted-replica-with-candidate: + relocated %+v replicas of %+v below %+v", len(relocatedReplicas), promotedReplica.Key, candidateInstance.Key) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("relocated %+v replicas of %+v below %+v", len(relocatedReplicas), promotedReplica.Key, candidateInstance.Key)) return log.Errore(err) @@ -845,7 +845,7 @@ func replacePromotedReplicaWithCandidate(topologyRecovery *TopologyRecovery, dea // checkAndRecoverDeadMaster checks a given analysis, decides whether to take action, and possibly takes action // Returns true when action was taken. func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedMasterRecovery) { + if !forceInstanceRecovery && !analysisEntry.ClusterDetails.HasAutomatedMasterRecovery { return false, nil, nil } topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) @@ -910,7 +910,7 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate if config.Config.ApplyMySQLPromotionAfterMasterFailover || analysisEntry.CommandHint == inst.GracefulMasterTakeoverCommandHint { // on GracefulMasterTakeoverCommandHint it makes utter sense to RESET SLAVE ALL and read_only=0, and there is no sense in not doing so. - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: will apply MySQL changes to promoted master")) + AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: will apply MySQL changes to promoted master") { _, err := inst.ResetReplicationOperation(&promotedReplica.Key) if err != nil { @@ -938,14 +938,14 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate if orcraft.IsRaftEnabled() { for _, kvPair := range kvPairs { _, err := orcraft.PublishCommand("put-key-value", kvPair) - log.Errore(err) + _ = log.Errore(err) } // since we'll be affecting 3rd party tools here, we _prefer_ to mitigate re-applying // of the put-key-value event upon startup. We _recommend_ a snapshot in the near future. - go orcraft.PublishCommand("async-snapshot", "") + go func() { _, _ = orcraft.PublishCommand("async-snapshot", "") }() } else { err := kv.PutKVPairs(kvPairs) - log.Errore(err) + _ = log.Errore(err) } { AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Distributing KV %+v", kvPairs)) @@ -956,13 +956,13 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate promotedReplica.Key.Hostname, promotedReplica.Key.Port, analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, ); err != nil { - log.Errorf("ProxySQL post-failover failed: %v", err) + _ = log.Errorf("ProxySQL post-failover failed: %v", err) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("ProxySQL post-failover failed: %v", err)) } if config.Config.MasterFailoverDetachReplicaMasterHost { postponedFunction := func() error { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: detaching master host on promoted master")) - inst.DetachReplicaMasterHost(&promotedReplica.Key) + AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: detaching master host on promoted master") + _, _ = inst.DetachReplicaMasterHost(&promotedReplica.Key) return nil } topologyRecovery.AddPostponedFunction(postponedFunction, fmt.Sprintf("RecoverDeadMaster, detaching promoted master host %+v", promotedReplica.Key)) @@ -973,14 +973,14 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: updating cluster_alias: %v -> %v", before, after)) //~~~inst.ReplaceClusterName(before, after) if alias := analysisEntry.ClusterDetails.ClusterAlias; alias != "" { - inst.SetClusterAlias(promotedReplica.Key.StringCode(), alias) + _ = inst.SetClusterAlias(promotedReplica.Key.StringCode(), alias) } else { - inst.ReplaceAliasClusterName(before, after) + _ = inst.ReplaceAliasClusterName(before, after) } return nil }() - attributes.SetGeneralAttribute(analysisEntry.ClusterDetails.ClusterDomain, promotedReplica.Key.StringCode()) + _ = attributes.SetGeneralAttribute(analysisEntry.ClusterDetails.ClusterDomain, promotedReplica.Key.StringCode()) if !skipProcesses { // Execute post master-failover processes @@ -1136,7 +1136,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce failedInstanceKey := &analysisEntry.AnalyzedInstanceKey recoveryResolved := false - inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, "problem found; will recover") + _ = inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, topologyRecovery.AddError(err) @@ -1155,7 +1155,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce } // We have a candidate AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will attempt a candidate intermediate master: %+v", candidateSiblingOfIntermediateMaster.Key)) - relocatedReplicas, candidateSibling, err, errs := inst.RelocateReplicas(failedInstanceKey, &candidateSiblingOfIntermediateMaster.Key, "") + relocatedReplicas, candidateSibling, errs, err := inst.RelocateReplicas(failedInstanceKey, &candidateSiblingOfIntermediateMaster.Key, "") topologyRecovery.AddErrors(errs) topologyRecovery.ParticipatingInstanceKeys.AddKey(candidateSiblingOfIntermediateMaster.Key) @@ -1171,7 +1171,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce recoveryResolved = true successorInstance = candidateSibling - inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated %d replicas under candidate sibling: %+v; %d errors: %+v", len(relocatedReplicas), candidateSibling.Key, len(errs), errs)) + _ = inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated %d replicas under candidate sibling: %+v; %d errors: %+v", len(relocatedReplicas), candidateSibling.Key, len(errs), errs)) } } // Plan A: find a replacement intermediate master in same Data Center @@ -1179,11 +1179,11 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce relocateReplicasToCandidateSibling() } if !recoveryResolved { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will next attempt regrouping of replicas")) + AuditTopologyRecovery(topologyRecovery, "- RecoverDeadIntermediateMaster: will next attempt regrouping of replicas") // Plan B: regroup (we wish to reduce cross-DC replication streams) lostReplicas, _, _, _, regroupPromotedReplica, regroupError := inst.RegroupReplicas(failedInstanceKey, true, nil, nil) if regroupError != nil { - topologyRecovery.AddError(regroupError) + _ = topologyRecovery.AddError(regroupError) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: regroup failed on: %+v", regroupError)) } if regroupPromotedReplica != nil { @@ -1197,7 +1197,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce } // Plan C: try replacement intermediate master in other DC... if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter != intermediateMasterInstance.DataCenter { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will next attempt relocating to another DC server")) + AuditTopologyRecovery(topologyRecovery, "- RecoverDeadIntermediateMaster: will next attempt relocating to another DC server") relocateReplicasToCandidateSibling() } } @@ -1210,7 +1210,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce // So, match up all that's left, plan D AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will next attempt to relocate up from %+v", *failedInstanceKey)) - relocatedReplicas, masterInstance, err, errs := inst.RelocateReplicas(failedInstanceKey, &analysisEntry.AnalyzedInstanceMasterKey, "") + relocatedReplicas, masterInstance, errs, err := inst.RelocateReplicas(failedInstanceKey, &analysisEntry.AnalyzedInstanceMasterKey, "") topologyRecovery.AddErrors(errs) topologyRecovery.ParticipatingInstanceKeys.AddKey(analysisEntry.AnalyzedInstanceMasterKey) @@ -1220,10 +1220,10 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce // There could have been a local replica taking over its siblings. We'd like to consider that one as successor. successorInstance = masterInstance } - inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated replicas under: %+v %d errors: %+v", successorInstance.Key, len(errs), errs)) + _ = inst.AuditOperation("recover-dead-intermediate-master", failedInstanceKey, fmt.Sprintf("Relocated replicas under: %+v %d errors: %+v", successorInstance.Key, len(errs), errs)) } else { err = log.Errorf("topology_recovery: RecoverDeadIntermediateMaster failed to match up any replica from %+v", *failedInstanceKey) - topologyRecovery.AddError(err) + _ = topologyRecovery.AddError(err) } } if !recoveryResolved { @@ -1239,7 +1239,7 @@ func RecoverDeadReplicationGroupMemberWithReplicas(topologyRecovery *TopologyRec topologyRecovery.Type = ReplicationGroupMemberRecovery analysisEntry := &topologyRecovery.AnalysisEntry failedGroupMemberInstanceKey := &analysisEntry.AnalyzedInstanceKey - inst.AuditOperation("recover-dead-replication-group-member-with-replicas", failedGroupMemberInstanceKey, "problem found; will recover") + _ = inst.AuditOperation("recover-dead-replication-group-member-with-replicas", failedGroupMemberInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, topologyRecovery.AddError(err) @@ -1258,11 +1258,11 @@ func RecoverDeadReplicationGroupMemberWithReplicas(topologyRecovery *TopologyRec AuditTopologyRecovery(topologyRecovery, "Finding a candidate group member to relocate replicas to") candidateGroupMemberInstanceKey := &groupMembers[rand.Intn(len(failedGroupMember.ReplicationGroupMembers.GetInstanceKeys()))] AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Found group member %+v", candidateGroupMemberInstanceKey)) - relocatedReplicas, successorInstance, err, errs := inst.RelocateReplicas(failedGroupMemberInstanceKey, candidateGroupMemberInstanceKey, "") + relocatedReplicas, successorInstance, errs, err := inst.RelocateReplicas(failedGroupMemberInstanceKey, candidateGroupMemberInstanceKey, "") topologyRecovery.AddErrors(errs) if len(relocatedReplicas) != len(failedGroupMember.Replicas.GetInstanceKeys()) { AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadReplicationGroupMemberWithReplicas: failed to move all replicas to candidate group member (%+v)", candidateGroupMemberInstanceKey)) - return nil, topologyRecovery.AddError(errors.New(fmt.Sprintf("RecoverDeadReplicationGroupMemberWithReplicas: Unable to relocate replicas to +%v", candidateGroupMemberInstanceKey))) + return nil, topologyRecovery.AddError(fmt.Errorf("RecoverDeadReplicationGroupMemberWithReplicas: Unable to relocate replicas to +%v", candidateGroupMemberInstanceKey)) } AuditTopologyRecovery(topologyRecovery, "All replicas successfully relocated") resolveRecovery(topologyRecovery, successorInstance) @@ -1272,7 +1272,7 @@ func RecoverDeadReplicationGroupMemberWithReplicas(topologyRecovery *TopologyRec // checkAndRecoverDeadIntermediateMaster checks a given analysis, decides whether to take action, and possibly takes action // Returns true when action was taken. func checkAndRecoverDeadIntermediateMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (bool, *TopologyRecovery, error) { - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery) { + if !forceInstanceRecovery && !analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery { return false, nil, nil } topologyRecovery, err := AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) @@ -1310,7 +1310,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) if otherCoMaster == nil || !found { return nil, lostReplicas, topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not read info for co-master %+v of %+v", *otherCoMasterKey, *failedInstanceKey)) } - inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "problem found; will recover") + _ = inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return nil, lostReplicas, topologyRecovery.AddError(err) @@ -1319,7 +1319,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadCoMaster: will recover %+v", *failedInstanceKey)) - var coMasterRecoveryType MasterRecoveryType = MasterRecoveryPseudoGTID + var coMasterRecoveryType = MasterRecoveryPseudoGTID if analysisEntry.OracleGTIDImmediateTopology || analysisEntry.MariaDBGTIDImmediateTopology { coMasterRecoveryType = MasterRecoveryGTID } @@ -1337,7 +1337,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) lostReplicas, _, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, nil) } } - topologyRecovery.AddError(err) + _ = topologyRecovery.AddError(err) lostReplicas = append(lostReplicas, cannotReplicateReplicas...) mustPromoteOtherCoMaster := config.Config.CoMasterRecoveryMustPromoteOtherCoMaster @@ -1356,11 +1356,11 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) // We are allowed to promote any server promotedReplica, err = replacePromotedReplicaWithCandidate(topologyRecovery, failedInstanceKey, promotedReplica, nil) } - topologyRecovery.AddError(err) + _ = topologyRecovery.AddError(err) } if promotedReplica != nil { if mustPromoteOtherCoMaster && !promotedReplica.Key.Equals(otherCoMasterKey) { - topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not manage to promote other-co-master %+v; was only able to promote %+v; mustPromoteOtherCoMaster is true (either CoMasterRecoveryMustPromoteOtherCoMaster is true, or co-master is writeable), therefore failing", *otherCoMasterKey, promotedReplica.Key)) + _ = topologyRecovery.AddError(log.Errorf("RecoverDeadCoMaster: could not manage to promote other-co-master %+v; was only able to promote %+v; mustPromoteOtherCoMaster is true (either CoMasterRecoveryMustPromoteOtherCoMaster is true, or co-master is writeable), therefore failing", *otherCoMasterKey, promotedReplica.Key)) promotedReplica = nil } } @@ -1391,7 +1391,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) // So in the case we promoted not-the-other-co-master, we issue a detach-replica-master-host, which is a reversible operation if promotedReplica != nil && !promotedReplica.Key.Equals(otherCoMasterKey) { _, err = inst.DetachReplicaMasterHost(&promotedReplica.Key) - topologyRecovery.AddError(log.Errore(err)) + _ = topologyRecovery.AddError(log.Errore(err)) } if promotedReplica != nil && len(lostReplicas) > 0 && config.Config.DetachLostReplicasAfterMasterFailover { @@ -1399,7 +1399,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadCoMaster: lost %+v replicas during recovery process; detaching them", len(lostReplicas))) for _, replica := range lostReplicas { replica := replica - inst.DetachReplicaMasterHost(&replica.Key) + _, _ = inst.DetachReplicaMasterHost(&replica.Key) } return nil } @@ -1407,11 +1407,11 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) } func() error { - inst.BeginDowntime(inst.NewDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) + _ = inst.BeginDowntime(inst.NewDowntime(failedInstanceKey, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) acknowledgeInstanceFailureDetection(&analysisEntry.AnalyzedInstanceKey) for _, replica := range lostReplicas { replica := replica - inst.BeginDowntime(inst.NewDowntime(&replica.Key, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) + _ = inst.BeginDowntime(inst.NewDowntime(&replica.Key, inst.GetMaintenanceOwner(), inst.DowntimeLostInRecoveryMessage, time.Duration(config.LostInRecoveryDowntimeSeconds)*time.Second)) } return nil }() @@ -1423,7 +1423,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) // Returns true when action was taken. func checkAndRecoverDeadCoMaster(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (bool, *TopologyRecovery, error) { failedInstanceKey := &analysisEntry.AnalyzedInstanceKey - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedMasterRecovery) { + if !forceInstanceRecovery && !analysisEntry.ClusterDetails.HasAutomatedMasterRecovery { return false, nil, nil } topologyRecovery, err := AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) @@ -1437,9 +1437,9 @@ func checkAndRecoverDeadCoMaster(analysisEntry inst.ReplicationAnalysis, candida promotedReplica, lostReplicas, err := RecoverDeadCoMaster(topologyRecovery, skipProcesses) resolveRecovery(topologyRecovery, promotedReplica) if promotedReplica == nil { - inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "Failure: no replica promoted.") + _ = inst.AuditOperation("recover-dead-co-master", failedInstanceKey, "Failure: no replica promoted.") } else { - inst.AuditOperation("recover-dead-co-master", failedInstanceKey, fmt.Sprintf("promoted: %+v", promotedReplica.Key)) + _ = inst.AuditOperation("recover-dead-co-master", failedInstanceKey, fmt.Sprintf("promoted: %+v", promotedReplica.Key)) } topologyRecovery.LostReplicas.AddInstances(lostReplicas) if promotedReplica != nil { @@ -1450,8 +1450,8 @@ func checkAndRecoverDeadCoMaster(analysisEntry inst.ReplicationAnalysis, candida recoverDeadCoMasterSuccessCounter.Inc(1) if config.Config.ApplyMySQLPromotionAfterMasterFailover { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: will apply MySQL changes to promoted master")) - inst.SetReadOnly(&promotedReplica.Key, false) + AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: will apply MySQL changes to promoted master") + _, _ = inst.SetReadOnly(&promotedReplica.Key, false) } if !skipProcesses { // Execute post intermediate-master-failover processes @@ -1478,7 +1478,7 @@ func checkAndRecoverNonWriteableMaster(analysisEntry inst.ReplicationAnalysis, c return false, nil, err } - inst.AuditOperation("recover-non-writeable-master", &analysisEntry.AnalyzedInstanceKey, "problem found; will recover") + _ = inst.AuditOperation("recover-non-writeable-master", &analysisEntry.AnalyzedInstanceKey, "problem found; will recover") if !skipProcesses { if err := executeProcesses(config.Config.PreFailoverProcesses, "PreFailoverProcesses", topologyRecovery, true); err != nil { return false, topologyRecovery, topologyRecovery.AddError(err) @@ -1535,7 +1535,7 @@ func recoverSemiSyncReplicas(topologyRecovery *TopologyRecovery, analysisEntry i // Disable semi-sync master on all replicas; this is to avoid semi-sync failures on the replicas (rpl_semi_sync_master_no_tx) // and to make it consistent with the logic in SetReadOnly for _, replica := range replicas { - inst.MaybeDisableSemiSyncMaster(replica) // it's okay if this fails + _, _ = inst.MaybeDisableSemiSyncMaster(replica) // it's okay if this fails } // Take action: we first enable and then disable (two loops) in order to avoid "locked master" scenarios @@ -1577,7 +1577,7 @@ func checkAndRecoverDeadGroupMemberWithReplicas(analysisEntry inst.ReplicationAn // Don't proceed with recovery unless it was forced or automatic intermediate source recovery is enabled. // We consider failed group members akin to failed intermediate masters, so we re-use the configuration for // intermediates. - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery) { + if !forceInstanceRecovery && !analysisEntry.ClusterDetails.HasAutomatedIntermediateMasterRecovery { return false, nil, nil } // Try to record the recovery. It it fails to be recorded, it because it is already being dealt with. @@ -1615,7 +1615,7 @@ func emergentlyReadTopologyInstance(instanceKey *inst.InstanceKey, analysisCode return nil, nil } instance, err = inst.ReadTopologyInstance(instanceKey) - inst.AuditOperation("emergently-read-topology-instance", instanceKey, string(analysisCode)) + _ = inst.AuditOperation("emergently-read-topology-instance", instanceKey, string(analysisCode)) return instance, err } @@ -1654,8 +1654,8 @@ func emergentlyRestartReplicationOnTopologyInstance(instanceKey *inst.InstanceKe return } - inst.RestartReplicationQuick(instance, instanceKey) - inst.AuditOperation("emergently-restart-replication-topology-instance", instanceKey, string(analysisCode)) + _ = inst.RestartReplicationQuick(instance, instanceKey) + _ = inst.AuditOperation("emergently-restart-replication-topology-instance", instanceKey, string(analysisCode)) }) } @@ -1813,7 +1813,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // Check for recovery being disabled globally if recerr != nil { // Unexpected. Shouldn't get this - log.Errorf("Unable to determine if recovery is disabled globally: %v", recerr) + _ = log.Errorf("Unable to determine if recovery is disabled globally: %v", recerr) } checkAndRecoverFunction, isActionableRecovery := getCheckAndRecoverFunction(analysisEntry.Analysis, &analysisEntry.AnalyzedInstanceKey) analysisEntry.IsActionableRecovery = isActionableRecovery @@ -1823,7 +1823,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // Unhandled problem type if analysisEntry.Analysis != inst.NoProblem { if util.ClearToLog("executeCheckAndRecoverFunction", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; key: %+v", + _ = log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; key: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) } } @@ -1857,7 +1857,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand } } if err != nil { - log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) + _ = log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) return false, nil, err } // We don't mind whether detection really executed the processes or not @@ -1892,7 +1892,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand if b, err := json.Marshal(topologyRecovery); err == nil { log.Infof("Topology recovery: %+v", string(b)) } else { - log.Infof("Topology recovery: %+v", *topologyRecovery) + log.Infof("Topology recovery: %+v", topologyRecovery) } if !skipProcesses { if topologyRecovery.SuccessorKey == nil { @@ -1900,15 +1900,15 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand executeProcesses(config.Config.PostUnsuccessfulFailoverProcesses, "PostUnsuccessfulFailoverProcesses", topologyRecovery, false) } else { // Execute general post failover processes - inst.EndDowntime(topologyRecovery.SuccessorKey) + _, _ = inst.EndDowntime(topologyRecovery.SuccessorKey) executeProcesses(config.Config.PostFailoverProcesses, "PostFailoverProcesses", topologyRecovery, false) } } - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Waiting for %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) + AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Waiting for %d postponed functions", topologyRecovery.Len())) topologyRecovery.Wait() - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - if topologyRecovery.PostponedFunctionsContainer.Len() > 0 { - AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed postponed functions: %+v", strings.Join(topologyRecovery.PostponedFunctionsContainer.Descriptions(), ", "))) + AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed %d postponed functions", topologyRecovery.Len())) + if topologyRecovery.Len() > 0 { + AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed postponed functions: %+v", strings.Join(topologyRecovery.Descriptions(), ", "))) } return recoveryAttempted, topologyRecovery, err } @@ -1991,10 +1991,10 @@ func ForceExecuteRecovery(analysisEntry inst.ReplicationAnalysis, candidateInsta func ForceMasterFailover(clusterName string) (topologyRecovery *TopologyRecovery, err error) { clusterMasters, err := inst.ReadClusterMaster(clusterName) if err != nil { - return nil, fmt.Errorf("Cannot deduce cluster master for %+v", clusterName) + return nil, fmt.Errorf("cannot deduce cluster master for %+v", clusterName) } if len(clusterMasters) != 1 { - return nil, fmt.Errorf("Cannot deduce cluster master for %+v", clusterName) + return nil, fmt.Errorf("cannot deduce cluster master for %+v", clusterName) } clusterMaster := clusterMasters[0] @@ -2031,7 +2031,7 @@ func ForceMasterTakeover(clusterName string, destination *inst.Instance) (topolo clusterMaster := clusterMasters[0] if !destination.MasterKey.Equals(&clusterMaster.Key) { - return nil, fmt.Errorf("You may only promote a direct child of the master %+v. The master of %+v is %+v.", clusterMaster.Key, destination.Key, destination.MasterKey) + return nil, fmt.Errorf("you may only promote a direct child of the master %+v; the master of %+v is %+v", clusterMaster.Key, destination.Key, destination.MasterKey) } log.Infof("Will demote %+v and promote %+v instead", clusterMaster.Key, destination.Key) @@ -2138,12 +2138,12 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, return nil, nil, fmt.Errorf("Sanity check failure. It seems like the designated instance %+v does not replicate from the master %+v (designated instance's master key is %+v). This error is strange. Panicking", designatedInstance.Key, clusterMaster.Key, designatedInstance.MasterKey) } if !designatedInstance.HasReasonableMaintenanceReplicationLag() { - return nil, nil, fmt.Errorf("Desginated instance %+v seems to be lagging too much for this operation. Aborting.", designatedInstance.Key) + return nil, nil, fmt.Errorf("designated instance %+v seems to be lagging too much for this operation", designatedInstance.Key) } if len(clusterMasterDirectReplicas) > 1 { log.Infof("GracefulMasterTakeover: Will let %+v take over its siblings", designatedInstance.Key) - relocatedReplicas, _, err, _ := inst.RelocateReplicas(&clusterMaster.Key, &designatedInstance.Key, "") + relocatedReplicas, _, _, err := inst.RelocateReplicas(&clusterMaster.Key, &designatedInstance.Key, "") if len(relocatedReplicas) != len(clusterMasterDirectReplicas)-1 { // We are unable to make designated instance master of all its siblings relocatedReplicasKeyMap := inst.NewInstanceKeyMap() @@ -2160,7 +2160,7 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, } if directReplica.IsDowntimed { // obviously we skip this one - log.Warningf("GracefulMasterTakeover: unable to relocate %+v below designated %+v, but since it is downtimed (downtime reason: %s) I will proceed", directReplica.Key, designatedInstance.Key, directReplica.DowntimeReason) + _ = log.Warningf("GracefulMasterTakeover: unable to relocate %+v below designated %+v, but since it is downtimed (downtime reason: %s) I will proceed", directReplica.Key, designatedInstance.Key, directReplica.DowntimeReason) continue } return nil, nil, fmt.Errorf("Desginated instance %+v cannot take over all of its siblings. Error: %+v", designatedInstance.Key, err) @@ -2208,10 +2208,10 @@ func GracefulMasterTakeover(clusterName string, designatedKey *inst.InstanceKey, if topologyRecovery.SuccessorKey == nil { // Promotion fails. // Undo setting read-only on original master. - inst.SetReadOnly(&clusterMaster.Key, false) + _, _ = inst.SetReadOnly(&clusterMaster.Key, false) return nil, nil, fmt.Errorf("GracefulMasterTakeover: Recovery attempted yet no replica promoted; err=%+v", err) } - var gtidHint inst.OperationGTIDHint = inst.GTIDHintNeutral + var gtidHint = inst.GTIDHintNeutral if topologyRecovery.RecoveryType == MasterRecoveryGTID { gtidHint = inst.GTIDHintForce } diff --git a/go/logic/topology_recovery_dao.go b/go/logic/topology_recovery_dao.go index 081cbf06..c4bee6eb 100644 --- a/go/logic/topology_recovery_dao.go +++ b/go/logic/topology_recovery_dao.go @@ -558,7 +558,7 @@ func readRecoveries(whereCondition string, limit string, args []interface{}) ([] topologyRecovery.AnalysisEntry.ClusterDetails.ClusterName = m.GetString("cluster_name") topologyRecovery.AnalysisEntry.ClusterDetails.ClusterAlias = m.GetString("cluster_alias") topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_slaves") - topologyRecovery.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) + _ = topologyRecovery.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) topologyRecovery.SuccessorKey = &inst.InstanceKey{} topologyRecovery.SuccessorKey.Hostname = m.GetString("successor_hostname") @@ -568,8 +568,8 @@ func readRecoveries(whereCondition string, limit string, args []interface{}) ([] topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") - topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_slaves")) - topologyRecovery.ParticipatingInstanceKeys.ReadCommaDelimitedList(m.GetString("participating_instances")) + _ = topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_slaves")) + _ = topologyRecovery.ParticipatingInstanceKeys.ReadCommaDelimitedList(m.GetString("participating_instances")) topologyRecovery.Acknowledged = m.GetBool("acknowledged") topologyRecovery.AcknowledgedAt = m.GetString("acknowledged_at") @@ -730,7 +730,7 @@ func readFailureDetections(whereCondition string, limit string, args []interface failureDetection.AnalysisEntry.ClusterDetails.ClusterName = m.GetString("cluster_name") failureDetection.AnalysisEntry.ClusterDetails.ClusterAlias = m.GetString("cluster_alias") failureDetection.AnalysisEntry.CountReplicas = m.GetUint("count_affected_slaves") - failureDetection.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) + _ = failureDetection.AnalysisEntry.ReadReplicaHostsFromString(m.GetString("slave_hosts")) failureDetection.AnalysisEntry.StartActivePeriod = m.GetString("start_active_period") failureDetection.RelatedRecoveryId = m.GetInt64("related_recovery_id") diff --git a/go/metrics/graphite.go b/go/metrics/graphite.go index b12acec2..e89226c5 100644 --- a/go/metrics/graphite.go +++ b/go/metrics/graphite.go @@ -44,10 +44,10 @@ func InitGraphiteMetrics() error { } graphitePathHostname := process.ThisHostname if config.Config.GraphiteConvertHostnameDotsToUnderscores { - graphitePathHostname = strings.Replace(graphitePathHostname, ".", "_", -1) + graphitePathHostname = strings.ReplaceAll(graphitePathHostname, ".", "_") } graphitePath := config.Config.GraphitePath - graphitePath = strings.Replace(graphitePath, "{hostname}", graphitePathHostname, -1) + graphitePath = strings.ReplaceAll(graphitePath, "{hostname}", graphitePathHostname) log.Debugf("Will log to graphite on %+v, %+v", config.Config.GraphiteAddr, graphitePath) diff --git a/go/os/process.go b/go/os/process.go index 0bf89f90..9f3247ef 100644 --- a/go/os/process.go +++ b/go/os/process.go @@ -18,7 +18,6 @@ package os import ( "fmt" - "io/ioutil" "os" "os/exec" "strings" @@ -39,7 +38,7 @@ func CommandRun(commandText string, env []string, arguments ...string) error { log.Infof("CommandRun(%v,%+v)", commandText, arguments) cmd, shellScript, err := generateShellScript(commandText, env, arguments...) - defer os.Remove(shellScript) + defer func() { _ = os.Remove(shellScript) }() if err != nil { return log.Errore(err) } @@ -53,7 +52,7 @@ func CommandRun(commandText string, env []string, arguments ...string) error { // Did the command fail because of an unsuccessful exit code if exitError, ok := err.(*exec.ExitError); ok { waitStatus = exitError.Sys().(syscall.WaitStatus) - log.Errorf("CommandRun: failed. exit status %d", waitStatus.ExitStatus()) + _ = log.Errorf("CommandRun: failed. exit status %d", waitStatus.ExitStatus()) } return log.Errore(fmt.Errorf("(%s) %s", err.Error(), cmdOutput)) @@ -74,12 +73,15 @@ func generateShellScript(commandText string, env []string, arguments ...string) shell := config.Config.ProcessesShellCommand commandBytes := []byte(commandText) - tmpFile, err := ioutil.TempFile("", "orchestrator-process-cmd-") + tmpFile, err := os.CreateTemp("", "orchestrator-process-cmd-") if err != nil { return nil, "", log.Errorf("generateShellScript() failed to create TempFile: %v", err.Error()) } + tmpFile.Close() // write commandText to temporary file - ioutil.WriteFile(tmpFile.Name(), commandBytes, 0640) + if err := os.WriteFile(tmpFile.Name(), commandBytes, 0640); err != nil { + return nil, "", err + } shellArguments := append([]string{}, tmpFile.Name()) shellArguments = append(shellArguments, arguments...) diff --git a/go/os/unixcheck.go b/go/os/unixcheck.go index 0a188aa8..c0b23121 100644 --- a/go/os/unixcheck.go +++ b/go/os/unixcheck.go @@ -42,20 +42,20 @@ func UserInGroups(authUser string, powerAuthGroups []string) bool { // The user not being known is not an error so don't report this. // ERROR Failed to lookup user "simon": user: unknown user simon if !strings.Contains(err.Error(), "unknown user") { - log.Errorf("Failed to lookup user %q: %v", authUser, err) + _ = log.Errorf("Failed to lookup user %q: %v", authUser, err) } return false } gids, err := currentUser.GroupIds() if err != nil { - log.Errorf("Failed to lookup groupids for user %q: %v", authUser, err) + _ = log.Errorf("Failed to lookup groupids for user %q: %v", authUser, err) return false } // get the group name from the id and check if the name is in powerGroupMap for _, gid := range gids { group, err := user.LookupGroupId(gid) if err != nil { - log.Errorf("Failed to lookup group id for gid %s: %v", gid, err) // yes gids are strings! + _ = log.Errorf("Failed to lookup group id for gid %s: %v", gid, err) // yes gids are strings! return false } diff --git a/go/process/election_dao.go b/go/process/election_dao.go index ae5376f1..26f480b4 100644 --- a/go/process/election_dao.go +++ b/go/process/election_dao.go @@ -129,7 +129,8 @@ func Reelect() error { } // ElectedNode returns the details of the elected node, as well as answering the question "is this process the elected one"? -func ElectedNode() (node NodeHealth, isElected bool, err error) { +func ElectedNode() (node *NodeHealth, isElected bool, err error) { + node = &NodeHealth{} query := ` select hostname, diff --git a/go/process/health.go b/go/process/health.go index e60f977b..4ceeb4bc 100644 --- a/go/process/health.go +++ b/go/process/health.go @@ -75,7 +75,7 @@ type HealthStatus struct { Hostname string Token string IsActiveNode bool - ActiveNode NodeHealth + ActiveNode *NodeHealth Error error AvailableNodes [](*NodeHealth) RaftLeader string @@ -89,7 +89,7 @@ type OrchestratorExecutionMode string const ( OrchestratorExecutionCliMode OrchestratorExecutionMode = "CLIMode" - OrchestratorExecutionHttpMode = "HttpMode" + OrchestratorExecutionHttpMode OrchestratorExecutionMode = "HttpMode" ) var continuousRegistrationOnce sync.Once @@ -111,7 +111,7 @@ func HealthTest() (health *HealthStatus, err error) { return healthStatus.(*HealthStatus), nil } - health = &HealthStatus{Healthy: false, Hostname: ThisHostname, Token: util.ProcessToken.Hash} + health = &HealthStatus{Healthy: false, Hostname: ThisHostname, Token: util.ProcessToken.Hash, ActiveNode: &NodeHealth{}} defer lastHealthCheckCache.Set(cacheKey, health, cache.DefaultExpiration) if healthy, err := RegisterNode(ThisNodeHealth); err != nil { @@ -135,7 +135,7 @@ func HealthTest() (health *HealthStatus, err error) { return health, log.Errore(err) } } - health.AvailableNodes, err = ReadAvailableNodes(true) + health.AvailableNodes, _ = ReadAvailableNodes(true) return health, nil } @@ -165,7 +165,7 @@ func ContinuousRegistration(extraInfo string, command string) { tickOperation := func() { healthy, err := RegisterNode(ThisNodeHealth) if err != nil { - log.Errorf("ContinuousRegistration: RegisterNode failed: %+v", err) + _ = log.Errorf("ContinuousRegistration: RegisterNode failed: %+v", err) } if healthy { atomic.StoreInt64(&LastContinousCheckHealthy, 1) diff --git a/go/process/health_dao.go b/go/process/health_dao.go index b2b26c5b..3acbc5d1 100644 --- a/go/process/health_dao.go +++ b/go/process/health_dao.go @@ -37,7 +37,7 @@ func WriteRegisterNode(nodeHealth *NodeHealth) (healthy bool, err error) { } nodeHealth.onceHistory.Do(func() { - db.ExecOrchestrator(` + _, _ = db.ExecOrchestrator(` insert ignore into node_health_history (hostname, token, first_seen_active, extra_info, command, app_version) values @@ -122,7 +122,7 @@ func ExpireAvailableNodes() { config.HealthPollSeconds*5, ) if err != nil { - log.Errorf("ExpireAvailableNodes: failed to remove old entries: %+v", err) + _ = log.Errorf("ExpireAvailableNodes: failed to remove old entries: %+v", err) } } diff --git a/go/process/host.go b/go/process/host.go index 9f92f28a..a1c552c8 100644 --- a/go/process/host.go +++ b/go/process/host.go @@ -27,6 +27,6 @@ func init() { var err error ThisHostname, err = os.Hostname() if err != nil { - log.Fatalf("Cannot resolve self hostname; required. Aborting. %+v", err) + _ = log.Fatalf("Cannot resolve self hostname; required. Aborting. %+v", err) } } diff --git a/go/proxysql/client.go b/go/proxysql/client.go index a24f5151..189d65ca 100644 --- a/go/proxysql/client.go +++ b/go/proxysql/client.go @@ -54,7 +54,7 @@ func (c *Client) Exec(query string, args ...interface{}) error { if err != nil { return err } - defer db.Close() + defer func() { _ = db.Close() }() _, err = db.Exec(query, args...) if err != nil { @@ -71,7 +71,7 @@ func (c *Client) Query(query string, args ...interface{}) (*sql.Rows, *sql.DB, e } rows, err := db.Query(query, args...) if err != nil { - db.Close() + _ = db.Close() return nil, nil, fmt.Errorf("proxysql: query failed: %v", err) } return rows, db, nil @@ -83,7 +83,7 @@ func (c *Client) Ping() error { if err != nil { return err } - defer db.Close() + defer func() { _ = db.Close() }() if err := db.Ping(); err != nil { return fmt.Errorf("proxysql: ping failed on %s:%d: %v", c.address, c.port, err) } diff --git a/go/proxysql/hook.go b/go/proxysql/hook.go index b4f09a6e..cb73d5e5 100644 --- a/go/proxysql/hook.go +++ b/go/proxysql/hook.go @@ -72,7 +72,7 @@ func (h *Hook) PostFailover(newMasterHost string, newMasterPort int, oldMasterHo return fmt.Errorf("proxysql: post-failover LOAD TO RUNTIME failed: %v", err) } if err := h.client.Exec("SAVE MYSQL SERVERS TO DISK"); err != nil { - log.Errorf("proxysql: post-failover SAVE TO DISK failed (non-fatal): %v", err) + _ = log.Errorf("proxysql: post-failover SAVE TO DISK failed (non-fatal): %v", err) } log.Infof("proxysql: post-failover: promoted %s:%d as writer", newMasterHost, newMasterPort) return nil @@ -88,7 +88,7 @@ func buildPreFailoverSQL(action, host string, port, writerHostgroup int) (string case "none": return "", nil default: - log.Warningf("proxysql: unknown preFailoverAction '%s', defaulting to 'offline_soft'", action) + _ = log.Warningf("proxysql: unknown preFailoverAction '%s', defaulting to 'offline_soft'", action) return "UPDATE mysql_servers SET status='OFFLINE_SOFT' WHERE hostname=? AND port=? AND hostgroup_id=?", args } } diff --git a/go/proxysql/init.go b/go/proxysql/init.go index aed896f4..ce8c1d7e 100644 --- a/go/proxysql/init.go +++ b/go/proxysql/init.go @@ -40,7 +40,7 @@ func InitHook() { config.Config.ProxySQLReaderHostgroup, ) } else if config.Config.ProxySQLAdminAddress != "" && config.Config.ProxySQLWriterHostgroup == 0 { - log.Warningf("ProxySQL: ProxySQLAdminAddress is set but ProxySQLWriterHostgroup is 0 (unconfigured). ProxySQL hooks will be inactive.") + _ = log.Warningf("ProxySQL: ProxySQLAdminAddress is set but ProxySQLWriterHostgroup is 0 (unconfigured). ProxySQL hooks will be inactive.") } defaultHook.Store(hook) }) diff --git a/go/proxysql/topology.go b/go/proxysql/topology.go index b0f5213b..75cb0848 100644 --- a/go/proxysql/topology.go +++ b/go/proxysql/topology.go @@ -29,8 +29,8 @@ func (c *Client) GetServers() ([]ServerEntry, error) { if err != nil { return nil, fmt.Errorf("proxysql: GetServers: %v", err) } - defer db.Close() - defer rows.Close() + defer func() { _ = db.Close() }() + defer func() { _ = rows.Close() }() var servers []ServerEntry for rows.Next() { @@ -60,8 +60,8 @@ func (c *Client) GetServersByHostgroup(hostgroupID int) ([]ServerEntry, error) { if err != nil { return nil, fmt.Errorf("proxysql: GetServersByHostgroup(%d): %v", hostgroupID, err) } - defer db.Close() - defer rows.Close() + defer func() { _ = db.Close() }() + defer func() { _ = rows.Close() }() var servers []ServerEntry for rows.Next() { diff --git a/go/raft/file_snapshot.go b/go/raft/file_snapshot.go index 057ef5ff..c8a9e18c 100644 --- a/go/raft/file_snapshot.go +++ b/go/raft/file_snapshot.go @@ -5,11 +5,9 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/proxysql/golib/log" "hash" "hash/crc64" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -17,6 +15,7 @@ import ( "time" "github.com/hashicorp/raft" + "github.com/proxysql/golib/log" ) const ( @@ -101,10 +100,7 @@ func NewFileSnapshotStoreWithLogger(base string, retain int) (*FileSnapshotStore // NewFileSnapshotStore creates a new FileSnapshotStore based // on a base directory. The `retain` parameter controls how many // snapshots are retained. Must be at least 1. -func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { - if logOutput == nil { - logOutput = os.Stderr - } +func NewFileSnapshotStore(base string, retain int, _ io.Writer) (*FileSnapshotStore, error) { return NewFileSnapshotStoreWithLogger(base, retain) } @@ -209,7 +205,7 @@ func (f *FileSnapshotStore) List() ([]*raft.SnapshotMeta, error) { // getSnapshots returns all the known snapshots. func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { // Get the eligible snapshots - snapshots, err := ioutil.ReadDir(f.path) + snapshots, err := os.ReadDir(f.path) if err != nil { _ = log.Errorf("snapshot: Failed to scan snapshot dir: %v", err) return nil, err @@ -255,7 +251,7 @@ func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { if err != nil { return nil, err } - defer fh.Close() + defer func() { _ = fh.Close() }() // Buffer the file IO buffered := bufio.NewReader(fh) @@ -293,23 +289,23 @@ func (f *FileSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, _, err = io.Copy(stateHash, fh) if err != nil { _ = log.Errorf("snapshot: Failed to read state file: %v", err) - fh.Close() + _ = fh.Close() return nil, nil, err } // Verify the hash computed := stateHash.Sum(nil) - if bytes.Compare(meta.CRC, computed) != 0 { + if !bytes.Equal(meta.CRC, computed) { _ = log.Errorf("snapshot: CRC checksum failed (stored: %v computed: %v)", meta.CRC, computed) - fh.Close() + _ = fh.Close() return nil, nil, fmt.Errorf("CRC mismatch") } // Seek to the start if _, err := fh.Seek(0, 0); err != nil { _ = log.Errorf("snapshot: State file seek failed: %v", err) - fh.Close() + _ = fh.Close() return nil, nil, err } @@ -344,7 +340,7 @@ func (f *FileSnapshotStore) ReapSnapshots(currentSnapshotMeta *fileSnapshotMeta) for _, snapshot := range snapshots { if snapshot.Term > currentSnapshotMeta.Term || snapshot.Term == currentSnapshotMeta.Term && snapshot.Index > currentSnapshotMeta.Index { - reapSnapshot(snapshot) + _ = reapSnapshot(snapshot) deprecatedSnapshotsReaped = true } } @@ -358,7 +354,7 @@ func (f *FileSnapshotStore) ReapSnapshots(currentSnapshotMeta *fileSnapshotMeta) } } for i := f.retain; i < len(snapshots); i++ { - reapSnapshot(snapshots[i]) + _ = reapSnapshot(snapshots[i]) } return nil } @@ -462,11 +458,11 @@ func (s *FileSnapshotSink) writeMeta() error { if err != nil { return err } - defer fh.Close() + defer func() { _ = fh.Close() }() // Buffer the file IO buffered := bufio.NewWriter(fh) - defer buffered.Flush() + defer func() { _ = buffered.Flush() }() // Write out as JSON enc := json.NewEncoder(buffered) diff --git a/go/raft/fsm.go b/go/raft/fsm.go index bfdbd31d..0a0c3d0d 100644 --- a/go/raft/fsm.go +++ b/go/raft/fsm.go @@ -33,7 +33,7 @@ type fsm Store func (f *fsm) Apply(l *raft.Log) interface{} { var c storeCommand if err := json.Unmarshal(l.Data, &c); err != nil { - log.Errorf("failed to unmarshal command: %s", err.Error()) + _ = log.Errorf("failed to unmarshal command: %s", err.Error()) } if c.Op == YieldCommand { @@ -88,7 +88,7 @@ func (f *fsm) Snapshot() (raft.FSMSnapshot, error) { // Restore restores freno state func (f *fsm) Restore(rc io.ReadCloser) error { - defer rc.Close() + defer func() { _ = rc.Close() }() return f.snapshotCreatorApplier.Restore(rc) } diff --git a/go/raft/http_client.go b/go/raft/http_client.go index 4c8ae9cc..a629edc2 100644 --- a/go/raft/http_client.go +++ b/go/raft/http_client.go @@ -19,7 +19,7 @@ package orcraft import ( "crypto/tls" "fmt" - "io/ioutil" + "io" "net" "net/http" "strings" @@ -89,7 +89,7 @@ func setupHttpClient() error { func HttpGetLeader(path string) (response []byte, err error) { leaderURI := LeaderURI.Get() if leaderURI == "" { - return nil, fmt.Errorf("Raft leader URI unknown") + return nil, fmt.Errorf("raft leader URI unknown") } leaderAPI := leaderURI if config.Config.URLPrefix != "" { @@ -101,6 +101,9 @@ func HttpGetLeader(path string) (response []byte, err error) { url := fmt.Sprintf("%s/%s", leaderAPI, path) req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } switch strings.ToLower(config.Config.AuthenticationMethod) { case "basic", "multi": req.SetBasicAuth(config.Config.HTTPAuthUser, config.Config.HTTPAuthPassword) @@ -110,9 +113,9 @@ func HttpGetLeader(path string) (response []byte, err error) { if err != nil { return nil, err } - defer res.Body.Close() + defer func() { _ = res.Body.Close() }() - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) if err != nil { return nil, err } diff --git a/go/raft/raft.go b/go/raft/raft.go index 1e1c4e1d..f129fad1 100644 --- a/go/raft/raft.go +++ b/go/raft/raft.go @@ -46,7 +46,7 @@ const ( raftTimeout = 10 * time.Second ) -var RaftNotRunning error = fmt.Errorf("raft is not configured/running") +var ErrRaftNotRunning error = fmt.Errorf("raft is not configured/running") var store *Store var raftSetupComplete int64 var ThisHostname string @@ -155,12 +155,12 @@ func Setup(applier CommandApplier, snapshotCreatorApplier SnapshotCreatorApplier go func() { for isTurnedLeader := range leaderCh { if isTurnedLeader { - PublishCommand("leader-uri", thisLeaderURI) + _, _ = PublishCommand("leader-uri", thisLeaderURI) } } }() - setupHttpClient() + _ = setupHttpClient() atomic.StoreInt64(&raftSetupComplete, 1) return nil @@ -183,7 +183,7 @@ func normalizeRaftHostnameIP(host string) (string, error) { ips, err := net.LookupIP(host) if err != nil { // resolve failed. But we don't want to fail the entire operation for that - log.Errore(err) + _ = log.Errore(err) return host, nil } // resolve success! @@ -268,18 +268,18 @@ func Snapshot() error { func AsyncSnapshot() error { asyncDuration := (time.Duration(rand.Int63()) % asyncSnapshotTimeframe) go time.AfterFunc(asyncDuration, func() { - Snapshot() + _ = Snapshot() }) return nil } func StepDown() { - getRaft().StepDown() + _ = getRaft().StepDown() } func Yield() error { if !IsRaftEnabled() { - return RaftNotRunning + return ErrRaftNotRunning } return getRaft().Yield() } @@ -294,14 +294,14 @@ func GetRaftAdvertise() string { func GetPeers() ([]string, error) { if !IsRaftEnabled() { - return []string{}, RaftNotRunning + return []string{}, ErrRaftNotRunning } return store.peerStore.Peers() } func IsPeer(peer string) (bool, error) { if !IsRaftEnabled() { - return false, RaftNotRunning + return false, ErrRaftNotRunning } return (store.raftBind == peer), nil } @@ -309,7 +309,7 @@ func IsPeer(peer string) (bool, error) { // PublishCommand will distribute a command across the group func PublishCommand(op string, value interface{}) (response interface{}, err error) { if !IsRaftEnabled() { - return nil, RaftNotRunning + return nil, ErrRaftNotRunning } b, err := json.Marshal(value) if err != nil { @@ -400,10 +400,10 @@ func Monitor() { if IsLeader() { athenticationToken := util.NewToken().Short() healthRequestAuthenticationTokenCache.Set(athenticationToken, true, cache.DefaultExpiration) - go PublishCommand("request-health-report", athenticationToken) + go func() { _, _ = PublishCommand("request-health-report", athenticationToken) }() } case err := <-fatalRaftErrorChan: - log.Fatale(err) + _ = log.Fatale(err) } } } diff --git a/go/raft/rel_store.go b/go/raft/rel_store.go index caee19cf..b9c25ee3 100644 --- a/go/raft/rel_store.go +++ b/go/raft/rel_store.go @@ -107,17 +107,17 @@ func (relStore *RelationalStore) Set(key []byte, val []byte) error { } _, err = stmt.Exec(key) if err != nil { - tx.Rollback() + _ = tx.Rollback() return err } stmt, err = tx.Prepare(`insert into raft_store (store_key, store_value) values (?, ?)`) if err != nil { - tx.Rollback() + _ = tx.Rollback() return err } _, err = stmt.Exec(key, val) if err != nil { - tx.Rollback() + _ = tx.Rollback() return err } err = tx.Commit() @@ -219,7 +219,7 @@ func (relStore *RelationalStore) StoreLogs(logs []*raft.Log) error { for _, raftLog := range logs { _, err = stmt.Exec(raftLog.Index, raftLog.Term, int(raftLog.Type), raftLog.Data) if err != nil { - tx.Rollback() + _ = tx.Rollback() return err } } diff --git a/go/ssl/ssl.go b/go/ssl/ssl.go index abbcce34..41a5f3f8 100644 --- a/go/ssl/ssl.go +++ b/go/ssl/ssl.go @@ -6,7 +6,7 @@ import ( "encoding/pem" "errors" "fmt" - "io/ioutil" + "os" nethttp "net/http" "strings" @@ -34,7 +34,6 @@ func NewTLSConfig(caFile string, verifyCert bool) (*tls.Config, error) { c.MinVersion = tls.VersionTLS12 // "If CipherSuites is nil, a default list of secure cipher suites is used" c.CipherSuites = nil - c.PreferServerCipherSuites = true if verifyCert { log.Info("verifyCert requested, client certificates will be verified") @@ -53,13 +52,13 @@ func NewTLSConfig(caFile string, verifyCert bool) (*tls.Config, error) { func ReadCAFile(caFile string) (*x509.CertPool, error) { var caCertPool *x509.CertPool if caFile != "" { - data, err := ioutil.ReadFile(caFile) + data, err := os.ReadFile(caFile) if err != nil { return nil, err } caCertPool = x509.NewCertPool() if !caCertPool.AppendCertsFromPEM(data) { - return nil, errors.New("No certificates parsed") + return nil, errors.New("no certificates parsed") } log.Info("Read in CA file:", caFile) } @@ -73,7 +72,7 @@ func Verify(r *nethttp.Request, validOUs []string) error { return nil } if r.TLS == nil { - return errors.New("No TLS") + return errors.New("no TLS") } for _, chain := range r.TLS.VerifiedChains { s := chain[0].Subject.OrganizationalUnit @@ -86,8 +85,8 @@ func Verify(r *nethttp.Request, validOUs []string) error { } } } - log.Error("No valid OUs found") - return errors.New("Invalid OU") + _ = log.Error("No valid OUs found") + return errors.New("invalid OU") } // VerifyOUsMiddleware returns an http.Handler middleware that verifies client @@ -139,7 +138,7 @@ func AppendKeyPairWithPassword(tlsConfig *tls.Config, certFile string, keyFile s // Read a PEM file and ask for a password to decrypt it if needed func ReadPEMData(pemFile string, pemPass []byte) ([]byte, error) { - pemData, err := ioutil.ReadFile(pemFile) + pemData, err := os.ReadFile(pemFile) if err != nil { return pemData, err } @@ -147,13 +146,16 @@ func ReadPEMData(pemFile string, pemPass []byte) ([]byte, error) { // We should really just get the pem.Block back here, if there's other // junk on the end, warn about it. pemBlock, rest := pem.Decode(pemData) + if pemBlock == nil { + return nil, fmt.Errorf("failed to decode PEM data from %s", pemFile) + } if len(rest) > 0 { - log.Warning("Didn't parse all of", pemFile) + _ = log.Warning("Didn't parse all of", pemFile) } - if x509.IsEncryptedPEMBlock(pemBlock) { + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // SA1019 no replacement available // Decrypt and get the ASN.1 DER bytes here - pemData, err = x509.DecryptPEMBlock(pemBlock, pemPass) + pemData, err = x509.DecryptPEMBlock(pemBlock, pemPass) //nolint:staticcheck // SA1019 no replacement available if err != nil { return pemData, err } else { @@ -183,15 +185,18 @@ func GetPEMPassword(pemFile string) []byte { // Determine if PEM file is encrypted func IsEncryptedPEM(pemFile string) bool { - pemData, err := ioutil.ReadFile(pemFile) + pemData, err := os.ReadFile(pemFile) if err != nil { return false } pemBlock, _ := pem.Decode(pemData) + if pemBlock == nil { + return false + } if len(pemBlock.Bytes) == 0 { return false } - return x509.IsEncryptedPEMBlock(pemBlock) + return x509.IsEncryptedPEMBlock(pemBlock) //nolint:staticcheck // SA1019 no replacement available } // ListenAndServeTLS acts identically to http.ListenAndServeTLS, except that it diff --git a/go/ssl/ssl_test.go b/go/ssl/ssl_test.go index bb233474..d81a94f1 100644 --- a/go/ssl/ssl_test.go +++ b/go/ssl/ssl_test.go @@ -5,7 +5,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" + "os" nethttp "net/http" "reflect" "strings" @@ -33,7 +33,7 @@ func TestHasString(t *testing.T) { // TODO: Build a fake CA and make sure it loads up func TestNewTLSConfig(t *testing.T) { fakeCA := writeFakeFile(pemCertificate) - defer syscall.Unlink(fakeCA) + defer func() { _ = syscall.Unlink(fakeCA) }() conf, err := ssl.NewTLSConfig(fakeCA, true) if err != nil { @@ -118,11 +118,11 @@ func TestVerify(t *testing.T) { func TestReadPEMData(t *testing.T) { pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) + defer func() { _ = syscall.Unlink(pemCertFile) }() pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) + defer func() { _ = syscall.Unlink(pemPKFile) }() pemPKWPFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKWPFile) + defer func() { _ = syscall.Unlink(pemPKWPFile) }() _, err := ssl.ReadPEMData(pemCertFile, []byte{}) if err != nil { t.Errorf("Failed to decode certificate: %s", err) @@ -146,9 +146,9 @@ func TestAppendKeyPair(t *testing.T) { t.Fatal(err) } pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) + defer func() { _ = syscall.Unlink(pemCertFile) }() pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) + defer func() { _ = syscall.Unlink(pemPKFile) }() if err := ssl.AppendKeyPair(c, pemCertFile, pemPKFile); err != nil { t.Errorf("Failed to append certificate and key to tls config: %s", err) @@ -161,9 +161,9 @@ func TestAppendKeyPairWithPassword(t *testing.T) { t.Fatal(err) } pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) + defer func() { _ = syscall.Unlink(pemCertFile) }() pemPKFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKFile) + defer func() { _ = syscall.Unlink(pemPKFile) }() if err := ssl.AppendKeyPairWithPassword(c, pemCertFile, pemPKFile, []byte("testing")); err != nil { t.Errorf("Failed to append certificate and key to tls config: %s", err) @@ -172,9 +172,9 @@ func TestAppendKeyPairWithPassword(t *testing.T) { func TestIsEncryptedPEM(t *testing.T) { pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) + defer func() { _ = syscall.Unlink(pemPKFile) }() pemPKWPFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKWPFile) + defer func() { _ = syscall.Unlink(pemPKWPFile) }() if ssl.IsEncryptedPEM(pemPKFile) { t.Errorf("Incorrectly identified unencrypted PEM as encrypted") } @@ -184,11 +184,11 @@ func TestIsEncryptedPEM(t *testing.T) { } func writeFakeFile(content string) string { - f, err := ioutil.TempFile("", "ssl_test") + f, err := os.CreateTemp("", "ssl_test") if err != nil { return "" } - ioutil.WriteFile(f.Name(), []byte(content), 0644) + _ = os.WriteFile(f.Name(), []byte(content), 0644) return f.Name() } diff --git a/go/util/token.go b/go/util/token.go index f31f8567..7a0a88e1 100644 --- a/go/util/token.go +++ b/go/util/token.go @@ -50,11 +50,11 @@ type Token struct { Hash string } -func (this *Token) Short() string { - if len(this.Hash) <= shortTokenLength { - return this.Hash +func (t *Token) Short() string { + if len(t.Hash) <= shortTokenLength { + return t.Hash } - return this.Hash[0:shortTokenLength] + return t.Hash[0:shortTokenLength] } var ProcessToken *Token = NewToken()