Skip to content

Commit

Permalink
Fixing merge issue
Browse files Browse the repository at this point in the history
ProxySQL add reader node if not in reader group
  • Loading branch information
svaroqui committed Jan 13, 2023
1 parent 7d7b623 commit 5d7ecf8
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 69 deletions.
6 changes: 1 addition & 5 deletions cluster/cluster_chk.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,11 +260,7 @@ func (cluster *Cluster) isFoundCandidateMaster() bool {
return true
}
key := -1
if cluster.Conf.MultiMasterGrouprep {
key = cluster.electSwitchoverGroupReplicationCandidate(cluster.slaves, true)
} else {
key = cluster.electFailoverCandidate(cluster.slaves, false)
}
key = cluster.electFailoverCandidate(cluster.slaves, false)
if key == -1 {
cluster.sme.AddState("ERR00032", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00032"]), ErrFrom: "CHECK"})
return false
Expand Down
30 changes: 2 additions & 28 deletions cluster/cluster_topo.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,23 +170,9 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
cluster.LogPrintf(LvlDbg, "Server %s is configured as a slave", sv.URL)
}
cluster.slaves = append(cluster.slaves, sv)
<<<<<<< HEAD
} else {
// not slave

if sv.BinlogDumpThreads == 0 && sv.State != stateMaster {
=======
} else { // not slave
if sv.IsGroupReplicationMaster {
cluster.master = cluster.Servers[k]
cluster.vmaster = cluster.Servers[k]
cluster.master.SetMaster()
if cluster.master.IsReadOnly() {
cluster.master.SetReadWrite()
cluster.LogPrintf(LvlInfo, "Group replication server %s disable read only ", cluster.master.URL)
}
} else if sv.BinlogDumpThreads == 0 && sv.State != stateMaster {
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
//sv.State = stateUnconn
//transition to standalone may happen despite server have never connect successfully when default to suspect
if cluster.Conf.LogLevel > 2 {
Expand All @@ -201,15 +187,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
cluster.SetState("ERR00063", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00063"]), ErrFrom: "TOPO"})
// cluster.Servers[k].RejoinMaster() /* remove for rolling restart , wrongly rejoin server as master before just after swithover while the server is just stopping */
} else {
<<<<<<< HEAD
=======
if cluster.Conf.LogLevel > 2 {
cluster.LogPrintf(LvlDbg, "Server %s was set master as last non slave", sv.URL)
}
if len(cluster.Servers) == 1 {
cluster.Conf.ActivePassive = true
}
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464

cluster.master = cluster.Servers[k]
cluster.master.SetMaster()
if cluster.master.IsReadOnly() && !cluster.master.IsRelay {
Expand All @@ -223,11 +201,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error {
} //end loop all servers

// If no cluster.slaves are detected, generate an error
<<<<<<< HEAD
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep {
=======
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoMultiMasterGrouprep && cluster.GetTopology() != topoActivePassive {
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
if len(cluster.slaves) == 0 && cluster.GetTopology() != topoMultiMasterWsrep && cluster.GetTopology() != topoActivePassive {
cluster.SetState("ERR00010", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00010"]), ErrFrom: "TOPO"})
}

Expand Down
6 changes: 5 additions & 1 deletion cluster/prx_proxysql.go
Original file line number Diff line number Diff line change
Expand Up @@ -401,16 +401,20 @@ func (proxy *ProxySQLProxy) Refresh() error {
updated = true
}
} else if s.IsSlave && !s.IsIgnored() && (s.PrevState == stateUnconn || s.PrevState == stateFailed) {

err = psql.SetReader(misc.Unbracket(s.Host), s.Port)

if cluster.Conf.ProxysqlDebug {
cluster.LogPrintf(LvlInfo, "Monitor ProxySQL setting reader standalone server %s", s.URL)
}
if err != nil {
cluster.sme.AddState("ERR00072", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00072"], err, s.URL), ErrFrom: "PRX", ServerUrl: proxy.Name})
}
updated = true
} else if s.IsSlave && !isFoundBackendRead && !s.IsIgnored() {
err = psql.AddServerAsReader(misc.Unbracket(s.Host), s.Port, "1", strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxReplicationLag), strconv.Itoa(s.ClusterGroup.Conf.PRXServersBackendMaxConnections), strconv.Itoa(misc.Bool2Int(s.ClusterGroup.Conf.PRXServersBackendCompression)), proxy.UseSSL())
updated = true
}

} //if bootstrap

// load the grants
Expand Down
34 changes: 3 additions & 31 deletions cluster/srv_rejoin.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ func (server *ServerMonitor) RejoinMaster() error {
if server.URL != server.ClusterGroup.master.URL {
server.ClusterGroup.SetState("WARN0022", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0022"], server.URL, server.ClusterGroup.master.URL), ErrFrom: "REJOIN"})
server.RejoinScript()

if server.ClusterGroup.Conf.FailoverSemiSyncState {
server.ClusterGroup.LogPrintf("INFO", "Set semisync replica and disable semisync leader %s", server.URL)
logs, err := server.SetSemiSyncReplica()
Expand Down Expand Up @@ -90,37 +91,10 @@ func (server *ServerMonitor) RejoinMaster() error {
server.ClusterGroup.LogPrintf("ERROR", "State transfer rejoin failed")
}
}
<<<<<<< HEAD
if server.ClusterGroup.Conf.AutorejoinBackupBinlog == true {
server.saveBinlog(crash)
}

}
} else {
//no master discovered
if server.ClusterGroup.lastmaster != nil {
if server.ClusterGroup.lastmaster.ServerID == server.ServerID {
server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL)
server.ClusterGroup.master = server
server.ClusterGroup.lastmaster = nil
} else {
if server.ClusterGroup.Conf.FailRestartUnsafe == false {
server.ClusterGroup.LogPrintf("INFO", "Rediscovering last seen master: %s", server.URL)

server.rejoinMasterAsSlave()

}
}
} else {
if server.ClusterGroup.Conf.FailRestartUnsafe == true {
server.ClusterGroup.LogPrintf("INFO", "Restart Unsafe Picking first non-slave as master: %s", server.URL)
server.ClusterGroup.master = server
}
}
// if consul or internal proxy need to adapt read only route to new slaves
server.ClusterGroup.backendStateChangeProxies()
=======

// if consul or internal proxy need to adapt read only route to new slaves
server.ClusterGroup.backendStateChangeProxies()
}
Expand All @@ -137,17 +111,15 @@ func (server *ServerMonitor) RejoinMaster() error {
if server.ClusterGroup.Conf.FailRestartUnsafe == false {
server.ClusterGroup.LogPrintf("INFO", "Rediscovering not the master from last seen master: %s", server.URL)
server.rejoinMasterAsSlave()
// if consul or internal proxy need to adapt read only route to new slaves
server.ClusterGroup.backendStateChangeProxies()
} else {
server.ClusterGroup.LogPrintf("INFO", "Rediscovering unsafe possibly electing old leader after cascading failure to flavor availability: %s", server.URL)
server.ClusterGroup.master = server
}
}

// if consul or internal proxy need to adapt read only route to new slaves
server.ClusterGroup.backendStateChangeProxies()
} // we have last seen master

>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
}
return nil
}
Expand Down
5 changes: 1 addition & 4 deletions cluster/srv_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,11 @@ func (server *ServerMonitor) SetState(state string) {
}

func (server *ServerMonitor) SetPrevState(state string) {
<<<<<<< HEAD
server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state changed to: %s", server.URL, state)
=======

if state == "" {
return
}
server.ClusterGroup.LogPrintf(LvlInfo, "Server %s previous state set to: %s", server.URL, state)
>>>>>>> bab5a650... 2 nodes cluster scenario can end up with cycling replication on the master #464
server.PrevState = state
}

Expand Down

0 comments on commit 5d7ecf8

Please sign in to comment.