Skip to content
This repository has been archived by the owner on Dec 10, 2021. It is now read-only.

Change node info structure #94

Merged
merged 1 commit into from
Aug 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion cmd/blast/manager_cluster_watch.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ func managerClusterWatch(c *cli.Context) error {
}
resp := &management.ClusterWatchResponse{
Event: 0,
Id: "",
Node: nil,
Cluster: cluster,
}
Expand Down
4 changes: 3 additions & 1 deletion cmd/blast/manager_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ func managerStart(c *cli.Context) error {
)

node := &management.Node{
Id: nodeId,
BindAddress: nodeAddr,
State: management.Node_UNKNOWN,
Metadata: &management.Metadata{
GrpcAddress: grpcAddr,
HttpAddress: httpAddr,
Expand All @@ -120,7 +122,7 @@ func managerStart(c *cli.Context) error {
IndexStorageType: indexStorageType,
}

svr, err := manager.NewServer(peerGrpcAddr, nodeId, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger)
svr, err := manager.NewServer(peerGrpcAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpLogger)
if err != nil {
return err
}
Expand Down
12 changes: 9 additions & 3 deletions dispatcher/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ func TestServer_Start(t *testing.T) {
managerRaftStorageType1 := "boltdb"

managerNode1 := &management.Node{
Id: managerNodeId1,
BindAddress: managerBindAddress1,
State: management.Node_UNKNOWN,
Metadata: &management.Metadata{
Expand All @@ -62,7 +63,7 @@ func TestServer_Start(t *testing.T) {
}

// create server
managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNodeId1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger)
managerServer1, err := manager.NewServer(managerPeerGrpcAddress1, managerNode1, managerDataDir1, managerRaftStorageType1, managerIndexConfig1, logger, grpcLogger, httpAccessLogger)
defer func() {
if managerServer1 != nil {
managerServer1.Stop()
Expand All @@ -84,6 +85,7 @@ func TestServer_Start(t *testing.T) {
managerRaftStorageType2 := "boltdb"

managerNode2 := &management.Node{
Id: managerNodeId2,
BindAddress: managerBindAddress2,
State: management.Node_UNKNOWN,
Metadata: &management.Metadata{
Expand All @@ -98,7 +100,7 @@ func TestServer_Start(t *testing.T) {
}

// create server
managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNodeId2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger)
managerServer2, err := manager.NewServer(managerPeerGrpcAddress2, managerNode2, managerDataDir2, managerRaftStorageType2, managerIndexConfig2, logger, grpcLogger, httpAccessLogger)
defer func() {
if managerServer2 != nil {
managerServer2.Stop()
Expand All @@ -120,6 +122,7 @@ func TestServer_Start(t *testing.T) {
managerRaftStorageType3 := "boltdb"

managerNode3 := &management.Node{
Id: managerNodeId3,
BindAddress: managerBindAddress3,
State: management.Node_UNKNOWN,
Metadata: &management.Metadata{
Expand All @@ -134,7 +137,7 @@ func TestServer_Start(t *testing.T) {
}

// create server
managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNodeId3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger)
managerServer3, err := manager.NewServer(managerPeerGrpcAddress3, managerNode3, managerDataDir3, managerRaftStorageType3, managerIndexConfig3, logger, grpcLogger, httpAccessLogger)
defer func() {
if managerServer3 != nil {
managerServer3.Stop()
Expand Down Expand Up @@ -166,6 +169,7 @@ func TestServer_Start(t *testing.T) {
expManagerCluster1 := &management.Cluster{
Nodes: map[string]*management.Node{
managerNodeId1: {
Id: managerNodeId1,
BindAddress: managerBindAddress1,
State: management.Node_LEADER,
Metadata: &management.Metadata{
Expand All @@ -174,6 +178,7 @@ func TestServer_Start(t *testing.T) {
},
},
managerNodeId2: {
Id: managerNodeId2,
BindAddress: managerBindAddress2,
State: management.Node_FOLLOWER,
Metadata: &management.Metadata{
Expand All @@ -182,6 +187,7 @@ func TestServer_Start(t *testing.T) {
},
},
managerNodeId3: {
Id: managerNodeId3,
BindAddress: managerBindAddress3,
State: management.Node_FOLLOWER,
Metadata: &management.Metadata{
Expand Down
3 changes: 1 addition & 2 deletions manager/grpc_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,8 @@ func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*management.Node, error)
return resp.Node, nil
}

func (c *GRPCClient) ClusterJoin(id string, node *management.Node, opts ...grpc.CallOption) error {
func (c *GRPCClient) ClusterJoin(node *management.Node, opts ...grpc.CallOption) error {
req := &management.ClusterJoinRequest{
Id: id,
Node: node,
}

Expand Down
11 changes: 4 additions & 7 deletions manager/grpc_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) {
// notify the cluster changes
clusterResp := &management.ClusterWatchResponse{
Event: management.ClusterWatchResponse_UPDATE,
Id: id,
Node: node,
Cluster: snapshotCluster,
}
Expand All @@ -227,7 +226,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) {
// notify the cluster changes
clusterResp := &management.ClusterWatchResponse{
Event: management.ClusterWatchResponse_JOIN,
Id: id,
Node: node,
Cluster: snapshotCluster,
}
Expand All @@ -244,7 +242,6 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) {
// notify the cluster changes
clusterResp := &management.ClusterWatchResponse{
Event: management.ClusterWatchResponse_LEAVE,
Id: id,
Node: node,
Cluster: snapshotCluster,
}
Expand Down Expand Up @@ -364,9 +361,9 @@ func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*manageme
}, nil
}

func (s *GRPCService) setNode(id string, node *management.Node) error {
func (s *GRPCService) setNode(node *management.Node) error {
if s.raftServer.IsLeader() {
err := s.raftServer.SetNode(id, node)
err := s.raftServer.SetNode(node)
if err != nil {
s.logger.Error(err.Error())
return err
Expand All @@ -378,7 +375,7 @@ func (s *GRPCService) setNode(id string, node *management.Node) error {
s.logger.Error(err.Error())
return err
}
err = client.ClusterJoin(id, node)
err = client.ClusterJoin(node)
if err != nil {
s.logger.Error(err.Error())
return err
Expand All @@ -391,7 +388,7 @@ func (s *GRPCService) setNode(id string, node *management.Node) error {
func (s *GRPCService) ClusterJoin(ctx context.Context, req *management.ClusterJoinRequest) (*empty.Empty, error) {
resp := &empty.Empty{}

err := s.setNode(req.Id, req.Node)
err := s.setNode(req.Node)
if err != nil {
s.logger.Error(err.Error())
return resp, status.Error(codes.Internal, err.Error())
Expand Down
12 changes: 6 additions & 6 deletions manager/raft_fsm.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func (f *RaftFSM) Stop() error {
return nil
}

func (f *RaftFSM) GetNodeConfig(nodeId string) (*management.Node, error) {
func (f *RaftFSM) GetNode(nodeId string) (*management.Node, error) {
f.clusterMutex.RLock()
defer f.clusterMutex.RUnlock()

Expand All @@ -72,16 +72,16 @@ func (f *RaftFSM) GetNodeConfig(nodeId string) (*management.Node, error) {
return node, nil
}

func (f *RaftFSM) SetNodeConfig(nodeId string, node *management.Node) error {
func (f *RaftFSM) SetNode(node *management.Node) error {
f.clusterMutex.RLock()
defer f.clusterMutex.RUnlock()

f.cluster.Nodes[nodeId] = node
f.cluster.Nodes[node.Id] = node

return nil
}

func (f *RaftFSM) DeleteNodeConfig(nodeId string) error {
func (f *RaftFSM) DeleteNode(nodeId string) error {
f.clusterMutex.RLock()
defer f.clusterMutex.RUnlock()

Expand Down Expand Up @@ -183,7 +183,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} {
f.logger.Error(err.Error())
return &fsmResponse{error: err}
}
err = f.SetNodeConfig(data["node_id"].(string), node)
err = f.SetNode(node)
if err != nil {
f.logger.Error(err.Error())
return &fsmResponse{error: err}
Expand All @@ -196,7 +196,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} {
f.logger.Error(err.Error())
return &fsmResponse{error: err}
}
err = f.DeleteNodeConfig(data["node_id"].(string))
err = f.DeleteNode(data["id"].(string))
return &fsmResponse{error: err}
case setKeyValue:
var data map[string]interface{}
Expand Down
Loading