From 5d7ecf8b3896575d33b108a7dab7fd038fa155bf Mon Sep 17 00:00:00 2001 From: apple Date: Fri, 13 Jan 2023 15:34:17 +0100 Subject: [PATCH] Fixing merge issue ProxySQL add reader node if not in reader group --- cluster/cluster_chk.go | 6 +----- cluster/cluster_topo.go | 30 ++---------------------------- cluster/prx_proxysql.go | 6 +++++- cluster/srv_rejoin.go | 34 +++------------------------------- cluster/srv_set.go | 5 +---- 5 files changed, 12 insertions(+), 69 deletions(-) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 81ef99bc6..ffb0262b2 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -260,11 +260,7 @@ func (cluster *Cluster) isFoundCandidateMaster() bool { return true } key := -1 - if cluster.Conf.MultiMasterGrouprep { - key = cluster.electSwitchoverGroupReplicationCandidate(cluster.slaves, true) - } else { - key = cluster.electFailoverCandidate(cluster.slaves, false) - } + key = cluster.electFailoverCandidate(cluster.slaves, false) if key == -1 { cluster.sme.AddState("ERR00032", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00032"]), ErrFrom: "CHECK"}) return false diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 7faadb97c..1f07d3588 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -170,23 +170,9 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { cluster.LogPrintf(LvlDbg, "Server %s is configured as a slave", sv.URL) } cluster.slaves = append(cluster.slaves, sv) -<<<<<<< HEAD } else { // not slave - if sv.BinlogDumpThreads == 0 && sv.State != stateMaster { -======= - } else { // not slave - if sv.IsGroupReplicationMaster { - cluster.master = cluster.Servers[k] - cluster.vmaster = cluster.Servers[k] - cluster.master.SetMaster() - if cluster.master.IsReadOnly() { - cluster.master.SetReadWrite() - cluster.LogPrintf(LvlInfo, "Group replication server %s disable read only ", cluster.master.URL) - } - } else if sv.BinlogDumpThreads == 0 && sv.State != stateMaster { ->>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464 //sv.State = stateUnconn //transition to standalone may happen despite server have never connect successfully when default to suspect if cluster.Conf.LogLevel > 2 { @@ -201,15 +187,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { cluster.SetState("ERR00063", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00063"]), ErrFrom: "TOPO"}) // cluster.Servers[k].RejoinMaster() /* remove for rolling restart , wrongly rejoin server as master before just after swithover while the server is just stopping */ } else { -<<<<<<< HEAD -======= - if cluster.Conf.LogLevel > 2 { - cluster.LogPrintf(LvlDbg, "Server %s was set master as last non slave", sv.URL) - } - if len(cluster.Servers) == 1 { - cluster.Conf.ActivePassive = true - } ->>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464 + cluster.master = cluster.Servers[k] cluster.master.SetMaster() if cluster.master.IsReadOnly() && !cluster.master.IsRelay { @@ -223,11 +201,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { } //end loop all servers // If no cluster.slaves are detected, generate an error -<<<<<<< HEAD - if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep { -======= - if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoMultiMasterGrouprep && cluster.GetTopology() != topoActivePassive { ->>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464 + if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoActivePassive { cluster.SetState("ERR00010", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00010"]), ErrFrom: "TOPO"}) } diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 137dceffb..f38b844c0 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -401,7 +401,9 @@ func (proxy *ProxySQLProxy) Refresh() error { updated = true } } else if s.IsSlave && !s.IsIgnored() && (s.PrevState == stateUnconn || s.PrevState == stateFailed) { + err = psql.SetReader(misc.Unbracket(s.Host), s.Port) + if cluster.Conf.ProxysqlDebug { cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting reader standalone server %s", s.URL) } @@ -409,8 +411,10 @@ func (proxy *ProxySQLProxy) Refresh() error { cluster.sme.AddState("ERR00072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00072"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name}) } updated = true + } else if s.IsSlave && !isFoundBackendRead && !s.IsIgnored() { + err = psql.AddServerAsReader(misc.Unbracket(s.Host), s.Port, "1", strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxReplicationLag), strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxConnections), strconv.Itoa(misc.Bool2Int(s.ClusterGroup.Conf.PRXServersBackendCompression)), proxy.UseSSL()) + updated = true } - } //if bootstrap // load the grants diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 3969fd33b..04d8f6c7c 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -56,6 +56,7 @@ func (server *ServerMonitor) RejoinMaster() error { if server.URL != server.ClusterGroup.master.URL { server.ClusterGroup.SetState("WARN0022", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0022"], server.URL, server.ClusterGroup.master.URL), ErrFrom: "REJOIN"}) server.RejoinScript() + if server.ClusterGroup.Conf.FailoverSemiSyncState { server.ClusterGroup.LogPrintf("INFO", "Set semisync replica and disable semisync leader %s", server.URL) logs, err := server.SetSemiSyncReplica() @@ -90,37 +91,10 @@ func (server *ServerMonitor) RejoinMaster() error { server.ClusterGroup.LogPrintf("ERROR", "State transfer rejoin failed") } } -<<<<<<< HEAD if server.ClusterGroup.Conf.AutorejoinBackupBinlog == true { server.saveBinlog(crash) } - } - } else { - //no master discovered - if server.ClusterGroup.lastmaster != nil { - if server.ClusterGroup.lastmaster.ServerID == server.ServerID { - server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL) - server.ClusterGroup.master = server - server.ClusterGroup.lastmaster = nil - } else { - if server.ClusterGroup.Conf.FailRestartUnsafe == false { - server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL) - - server.rejoinMasterAsSlave() - - } - } - } else { - if server.ClusterGroup.Conf.FailRestartUnsafe == true { - server.ClusterGroup.LogPrintf("INFO", "Restart Unsafe Picking first non-slave as master: %s", server.URL) - server.ClusterGroup.master = server - } - } - // if consul or internal proxy need to adapt read only route to new slaves - server.ClusterGroup.backendStateChangeProxies() -======= - // if consul or internal proxy need to adapt read only route to new slaves server.ClusterGroup.backendStateChangeProxies() } @@ -137,17 +111,15 @@ func (server *ServerMonitor) RejoinMaster() error { if server.ClusterGroup.Conf.FailRestartUnsafe == false { server.ClusterGroup.LogPrintf("INFO", "Rediscovering not the master from last seen master: %s", server.URL) server.rejoinMasterAsSlave() - // if consul or internal proxy need to adapt read only route to new slaves - server.ClusterGroup.backendStateChangeProxies() } else { server.ClusterGroup.LogPrintf("INFO", "Rediscovering unsafe possibly electing old leader after cascading failure to flavor availability: %s", server.URL) server.ClusterGroup.master = server } } - + // if consul or internal proxy need to adapt read only route to new slaves + server.ClusterGroup.backendStateChangeProxies() } // we have last seen master ->>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464 } return nil } diff --git a/cluster/srv_set.go b/cluster/srv_set.go index e4cf178e5..7803fc186 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -57,14 +57,11 @@ func (server *ServerMonitor) SetState(state string) { } func (server *ServerMonitor) SetPrevState(state string) { -<<<<<<< HEAD - server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state changed to: %s", server.URL, state) -======= + if state == "" { return } server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state set to: %s", server.URL, state) ->>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464 server.PrevState = state }