Skip to content

Commit

Permalink
Merge branch 'master' into record-max
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot[bot] authored Mar 21, 2024
2 parents 23db662 + 955d30a commit 6ea61b4
Show file tree
Hide file tree
Showing 5 changed files with 82 additions and 61 deletions.
15 changes: 8 additions & 7 deletions metrics/grafana/pd.json
Original file line number Diff line number Diff line change
Expand Up @@ -1738,7 +1738,7 @@
"tableColumn": "idalloc",
"targets": [
{
"expr": "max(pd_cluster_id{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"idalloc\"})",
"expr": "pd_cluster_id{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"idalloc\"}!=0",
"format": "time_series",
"hide": false,
"instant": true,
Expand Down Expand Up @@ -2284,7 +2284,7 @@
"tableColumn": "tso",
"targets": [
{
"expr": "max(pd_cluster_tso{type=\"tso\", dc=\"global\"})",
"expr": "pd_cluster_tso{type=\"tso\", dc=\"global\"}!=0",
"format": "time_series",
"instant": true,
"interval": "",
Expand Down Expand Up @@ -2588,7 +2588,7 @@
"tableColumn": "tso",
"targets": [
{
"expr": "max(pd_cluster_tso{type=\"tso\", dc=\"global\"})",
"expr": "pd_cluster_tso{type=\"tso\", dc=\"global\"}!=0",
"format": "time_series",
"instant": true,
"interval": "",
Expand Down Expand Up @@ -7895,6 +7895,7 @@
"targets": [
{
"expr": "pd_checker_patrol_regions_time{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\"} != 0",
"legendFormat": "{{instance}}",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
Expand Down Expand Up @@ -8474,14 +8475,14 @@
"refId": "A"
},
{
"expr": "rate(pd_schedule_scatter_operators_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"fail\"}[1m]*60)",
"expr": "rate(pd_schedule_scatter_operators_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"fail\"}[1m])*60",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "fail",
"refId": "B"
},
{
"expr": "rate(pd_schedule_scatter_operators_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"success\"}[1m]*60)",
"expr": "rate(pd_schedule_scatter_operators_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", type=\"success\"}[1m])*60",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "success",
Expand Down Expand Up @@ -9296,15 +9297,15 @@
"steppedLine": false,
"targets": [
{
"expr": "etcd_mvcc_db_total_size_in_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", job=\"pd\"}",
"expr": "etcd_mvcc_db_total_size_in_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\".*pd.*\"}",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{instance}}-physically-allocated",
"refId": "A"
},
{
"expr": "etcd_mvcc_db_total_size_in_use_in_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", job=\"pd\"}",
"expr": "etcd_mvcc_db_total_size_in_use_in_bytes{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\".*pd.*\"}",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
Expand Down
4 changes: 3 additions & 1 deletion pkg/mcs/scheduling/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"sync"
Expand Down Expand Up @@ -413,7 +414,8 @@ func (s *Server) startServer() (err error) {
// different service modes provided by the same pd-server binary
bs.ServerInfoGauge.WithLabelValues(versioninfo.PDReleaseVersion, versioninfo.PDGitHash).Set(float64(time.Now().Unix()))
bs.ServerMaxProcsGauge.Set(float64(runtime.GOMAXPROCS(0)))
deployPath, err := os.Executable()
execPath, err := os.Executable()
deployPath := filepath.Dir(execPath)
if err != nil {
deployPath = ""
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/mcs/tso/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"sync"
Expand Down Expand Up @@ -368,7 +369,8 @@ func (s *Server) startServer() (err error) {
s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.Context())
legacySvcRootPath := endpoint.LegacyRootPath(s.clusterID)
tsoSvcRootPath := endpoint.TSOSvcRootPath(s.clusterID)
deployPath, err := os.Executable()
execPath, err := os.Executable()
deployPath := filepath.Dir(execPath)
if err != nil {
deployPath = ""
}
Expand Down
26 changes: 21 additions & 5 deletions server/api/region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"net/http"
"net/url"
"sort"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -333,13 +334,28 @@ func TestRegionsWithKillRequest(t *testing.T) {
url := fmt.Sprintf("%s%s/api/v1/regions", addr, apiPrefix)
mustBootstrapCluster(re, svr)
regionCount := 100000

// create data
var wg sync.WaitGroup
tasks := make(chan int, regionCount)
for w := 0; w < 16; w++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := range tasks {
r := core.NewTestRegionInfo(uint64(i+2), 1,
[]byte(fmt.Sprintf("%09d", i)),
[]byte(fmt.Sprintf("%09d", i+1)),
core.SetApproximateKeys(10), core.SetApproximateSize(10))
mustRegionHeartbeat(re, svr, r)
}
}()
}
for i := 0; i < regionCount; i++ {
r := core.NewTestRegionInfo(uint64(i+2), 1,
[]byte(fmt.Sprintf("%09d", i)),
[]byte(fmt.Sprintf("%09d", i+1)),
core.SetApproximateKeys(10), core.SetApproximateSize(10))
mustRegionHeartbeat(re, svr, r)
tasks <- i
}
close(tasks)
wg.Wait()

ctx, cancel := context.WithCancel(context.Background())
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
Expand Down
94 changes: 47 additions & 47 deletions server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (suite *leaderServerTestSuite) TearDownSuite() {
}
}

func (suite *leaderServerTestSuite) newTestServersWithCfgs(
func newTestServersWithCfgs(
ctx context.Context,
cfgs []*config.Config,
re *require.Assertions,
Expand Down Expand Up @@ -135,52 +135,6 @@ func (suite *leaderServerTestSuite) newTestServersWithCfgs(
return svrs, cleanup
}

func (suite *leaderServerTestSuite) TestCheckClusterID() {
re := suite.Require()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfgs := NewTestMultiConfig(assertutil.CheckerWithNilAssert(re), 2)
for i, cfg := range cfgs {
cfg.DataDir = fmt.Sprintf("/tmp/test_pd_check_clusterID_%d", i)
// Clean up before testing.
testutil.CleanServer(cfg.DataDir)
}
originInitial := cfgs[0].InitialCluster
for _, cfg := range cfgs {
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls)
}

cfgA, cfgB := cfgs[0], cfgs[1]
// Start a standalone cluster.
svrsA, cleanA := suite.newTestServersWithCfgs(ctx, []*config.Config{cfgA}, re)
defer cleanA()
// Close it.
for _, svr := range svrsA {
svr.Close()
}

// Start another cluster.
_, cleanB := suite.newTestServersWithCfgs(ctx, []*config.Config{cfgB}, re)
defer cleanB()

// Start previous cluster, expect an error.
cfgA.InitialCluster = originInitial
mockHandler := CreateMockHandler(re, "127.0.0.1")
svr, err := CreateServer(ctx, cfgA, nil, mockHandler)
re.NoError(err)

etcd, err := embed.StartEtcd(svr.etcdCfg)
re.NoError(err)
urlsMap, err := types.NewURLsMap(svr.cfg.InitialCluster)
re.NoError(err)
tlsConfig, err := svr.cfg.Security.ToTLSConfig()
re.NoError(err)
err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlsMap, tlsConfig)
re.Error(err)
etcd.Close()
testutil.CleanServer(cfgA.DataDir)
}

func (suite *leaderServerTestSuite) TestRegisterServerHandler() {
re := suite.Require()
cfg := NewTestSingleConfig(assertutil.CheckerWithNilAssert(re))
Expand Down Expand Up @@ -330,3 +284,49 @@ func TestIsPathInDirectory(t *testing.T) {
path = filepath.Join(directory, fileName)
re.False(isPathInDirectory(path, directory))
}

func TestCheckClusterID(t *testing.T) {
re := require.New(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfgs := NewTestMultiConfig(assertutil.CheckerWithNilAssert(re), 2)
for i, cfg := range cfgs {
cfg.DataDir = fmt.Sprintf("/tmp/test_pd_check_clusterID_%d", i)
// Clean up before testing.
testutil.CleanServer(cfg.DataDir)
}
originInitial := cfgs[0].InitialCluster
for _, cfg := range cfgs {
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls)
}

cfgA, cfgB := cfgs[0], cfgs[1]
// Start a standalone cluster.
svrsA, cleanA := newTestServersWithCfgs(ctx, []*config.Config{cfgA}, re)
defer cleanA()
// Close it.
for _, svr := range svrsA {
svr.Close()
}

// Start another cluster.
_, cleanB := newTestServersWithCfgs(ctx, []*config.Config{cfgB}, re)
defer cleanB()

// Start previous cluster, expect an error.
cfgA.InitialCluster = originInitial
mockHandler := CreateMockHandler(re, "127.0.0.1")
svr, err := CreateServer(ctx, cfgA, nil, mockHandler)
re.NoError(err)

etcd, err := embed.StartEtcd(svr.etcdCfg)
re.NoError(err)
urlsMap, err := types.NewURLsMap(svr.cfg.InitialCluster)
re.NoError(err)
tlsConfig, err := svr.cfg.Security.ToTLSConfig()
re.NoError(err)
err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlsMap, tlsConfig)
re.Error(err)
etcd.Close()
testutil.CleanServer(cfgA.DataDir)
}

0 comments on commit 6ea61b4

Please sign in to comment.