Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: improve some logs #1529

Merged
merged 1 commit into from
May 6, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions server/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,13 @@ func (c *RaftCluster) start() error {
func (c *RaftCluster) runCoordinator() {
defer logutil.LogPanic()
defer c.wg.Done()
defer c.coordinator.wg.Wait()
defer func() {
c.coordinator.wg.Wait()
log.Info("coordinator has been stopped")
}()
c.coordinator.run()
<-c.coordinator.ctx.Done()
log.Info("coordinator: Stopped coordinator")
log.Info("coordinator is stopping")
}

func (c *RaftCluster) stop() {
Expand Down Expand Up @@ -550,6 +553,7 @@ func (c *RaftCluster) runBackgroundJobs(interval time.Duration) {
for {
select {
case <-c.quit:
log.Info("background jobs has been stopped")
return
case <-ticker.C:
c.checkOperators()
Expand Down
12 changes: 7 additions & 5 deletions server/coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,14 +123,15 @@ func (c *coordinator) patrolRegions() {
timer := time.NewTimer(c.cluster.GetPatrolRegionInterval())
defer timer.Stop()

log.Info("coordinator: start patrol regions")
log.Info("coordinator starts patrol regions")
start := time.Now()
var key []byte
for {
select {
case <-timer.C:
timer.Reset(c.cluster.GetPatrolRegionInterval())
case <-c.ctx.Done():
log.Info("patrol regions has been stopped")
return
}

Expand Down Expand Up @@ -210,19 +211,20 @@ func (c *coordinator) checkRegion(region *core.RegionInfo) bool {
func (c *coordinator) run() {
ticker := time.NewTicker(runSchedulerCheckInterval)
defer ticker.Stop()
log.Info("coordinator: Start collect cluster information")
log.Info("coordinator starts to collect cluster information")
for {
if c.shouldRun() {
log.Info("coordinator: Cluster information is prepared")
log.Info("coordinator has finished cluster information preparation")
break
}
select {
case <-ticker.C:
case <-c.ctx.Done():
log.Info("coordinator stops running")
return
}
}
log.Info("coordinator: Run scheduler")
log.Info("coordinator starts to run schedulers")

k := 0
scheduleCfg := c.cluster.opt.load()
Expand Down Expand Up @@ -437,7 +439,7 @@ func (c *coordinator) runScheduler(s *scheduleController) {
}

case <-s.Ctx().Done():
log.Infof("%v stopped: %v", s.GetName(), s.Ctx().Err())
log.Infof("%v has been stopped: %v", s.GetName(), s.Ctx().Err())
return
}
}
Expand Down
12 changes: 8 additions & 4 deletions server/leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,13 @@ func (s *Server) memberInfo() (member *pdpb.Member, marshalStr string) {
}

func (s *Server) campaignLeader() error {
log.Debugf("begin to campaign leader %s", s.Name())
log.Infof("start to campaign leader %s", s.Name())

lessor := clientv3.NewLease(s.client)
defer lessor.Close()
defer func() {
lessor.Close()
log.Info("exit campaign leader")
}()

start := time.Now()
ctx, cancel := context.WithTimeout(s.client.Ctx(), requestTimeout)
Expand All @@ -225,7 +228,7 @@ func (s *Server) campaignLeader() error {
return errors.WithStack(err)
}
if !resp.Succeeded {
return errors.New("campaign leader failed, other server may campaign ok")
return errors.New("failed to campaign leader, other server may campaign ok")
}

// Make the leader keepalived.
Expand All @@ -236,7 +239,7 @@ func (s *Server) campaignLeader() error {
if err != nil {
return errors.WithStack(err)
}
log.Debugf("campaign leader ok %s", s.Name())
log.Infof("campaign leader ok %s", s.Name())

err = s.scheduleOpt.reload(s.kv)
if err != nil {
Expand Down Expand Up @@ -276,6 +279,7 @@ func (s *Server) campaignLeader() error {
}
case <-tsTicker.C:
if err = s.updateTimestamp(); err != nil {
log.Info("failed to update timestamp")
return err
}
etcdLeader := s.GetEtcdLeader()
Expand Down