diff --git a/go.mod b/go.mod index 18e20d40b6..e1286b4325 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,6 @@ require ( github.com/hpcloud/tail v1.0.0 // indirect github.com/imdario/mergo v0.3.7 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 github.com/jonboulle/clockwork v0.1.0 // indirect github.com/json-iterator/go v1.1.5 // indirect github.com/juju/errors v0.0.0-20180806074554-22422dad46e1 diff --git a/go.sum b/go.sum index b590cbe18f..1ae5869159 100644 --- a/go.sum +++ b/go.sum @@ -122,8 +122,6 @@ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs= -github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index 58efed00a1..f0db2a828c 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -14,38 +14,166 @@ package main import ( + "context" "fmt" + "net/http" _ "net/http/pprof" - "os" - "time" - - "github.com/pingcap/tidb-operator/tests/pkg/apimachinery" - - v1 "k8s.io/api/core/v1" + "sync" "github.com/golang/glog" - "github.com/jinzhu/copier" "github.com/pingcap/tidb-operator/tests" + "github.com/pingcap/tidb-operator/tests/pkg/apimachinery" "github.com/pingcap/tidb-operator/tests/pkg/blockwriter" "github.com/pingcap/tidb-operator/tests/pkg/client" + v1 "k8s.io/api/core/v1" "k8s.io/apiserver/pkg/util/logs" ) +var cfg *tests.Config +var certCtx *apimachinery.CertContext +var upgradeVersions []string + func main() { logs.InitLogs() defer logs.FlushLogs() + go func() { + glog.Info(http.ListenAndServe(":6060", nil)) + }() - conf := tests.ParseConfigOrDie() - conf.ManifestDir = "/manifests" + cfg = tests.ParseConfigOrDie() + cfg.ManifestDir = "/manifests" + upgradeVersions = cfg.GetUpgradeTidbVersionsOrDie() + ns := "e2e" + + var err error + certCtx, err = apimachinery.SetupServerCert(ns, tests.WebhookServiceName) + if err != nil { + panic(err) + } + go tests.StartValidatingAdmissionWebhookServerOrDie(certCtx) cli, kubeCli := client.NewCliOrDie() - oa := tests.NewOperatorActions(cli, kubeCli, 5*time.Second, conf, nil) + ocfg := newOperatorConfig() + + cluster1 := newTidbClusterConfig(ns, "cluster1", "") + cluster2 := newTidbClusterConfig(ns, "cluster2", "admin") + cluster2.Resources["pd.replicas"] = "1" + cluster3 := newTidbClusterConfig(ns, "cluster3", "admin") + cluster4 := newTidbClusterConfig(ns, "cluster4", "admin") + + allClusters := []*tests.TidbClusterConfig{ + cluster1, + cluster2, + } + + oa := tests.NewOperatorActions(cli, kubeCli, tests.DefaultPollInterval, cfg, nil) + oa.LabelNodesOrDie() + oa.CleanOperatorOrDie(ocfg) + oa.DeployOperatorOrDie(ocfg) + + fn1 := func(wg *sync.WaitGroup) { + defer wg.Done() + oa.CleanTidbClusterOrDie(cluster1) + oa.DeployTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) + oa.CheckDisasterToleranceOrDie(cluster1) + + // scale + cluster1.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5) + oa.ScaleTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) + oa.CheckDisasterToleranceOrDie(cluster1) + + cluster1.ScaleTiDB(2).ScaleTiKV(4).ScalePD(3) + oa.ScaleTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) + oa.CheckDisasterToleranceOrDie(cluster1) + + // configuration change + cluster1.EnableConfigMapRollout = true + // moved to stability test because these two cases need too many times + // bad conf + //cluster1.TiDBPreStartScript = strconv.Quote("exit 1") + //cluster1.TiKVPreStartScript = strconv.Quote("exit 1") + //cluster1.PDPreStartScript = strconv.Quote("exit 1") + //oa.UpgradeTidbClusterOrDie(cluster1) + //time.Sleep(30 * time.Second) + //oa.CheckTidbClustersAvailableOrDie([]*tests.TidbClusterConfig{cluster1}) + // rollback conf + //cluster1.PDPreStartScript = strconv.Quote("") + //cluster1.TiKVPreStartScript = strconv.Quote("") + //cluster1.TiDBPreStartScript = strconv.Quote("") + //oa.UpgradeTidbClusterOrDie(cluster1) + //oa.CheckTidbClusterStatusOrDie(cluster1) + // correct conf + cluster1.UpdatePdMaxReplicas(cfg.PDMaxReplicas). + UpdateTiKVGrpcConcurrency(cfg.TiKVGrpcConcurrency). + UpdateTiDBTokenLimit(cfg.TiDBTokenLimit) + oa.UpgradeTidbClusterOrDie(cluster1) + oa.CheckTidbClusterStatusOrDie(cluster1) + } + fn2 := func(wg *sync.WaitGroup) { + defer wg.Done() + + // deploy + oa.CleanTidbClusterOrDie(cluster2) + oa.DeployTidbClusterOrDie(cluster2) + oa.CheckTidbClusterStatusOrDie(cluster2) + oa.CheckDisasterToleranceOrDie(cluster2) + + cluster2.ScalePD(3) + oa.ScaleTidbClusterOrDie(cluster2) + oa.CheckTidbClusterStatusOrDie(cluster2) + + // upgrade + oa.RegisterWebHookAndServiceOrDie(certCtx, ocfg) + ctx, cancel := context.WithCancel(context.Background()) + assignedNodes := oa.GetTidbMemberAssignedNodesOrDie(cluster2) + cluster2.UpgradeAll(upgradeVersions[0]) + oa.UpgradeTidbClusterOrDie(cluster2) + oa.CheckUpgradeOrDie(ctx, cluster2) + oa.CheckManualPauseTiDBOrDie(cluster2) + oa.CheckTidbClusterStatusOrDie(cluster2) + oa.CheckTidbMemberAssignedNodesOrDie(cluster2, assignedNodes) + cancel() + + oa.CleanWebHookAndServiceOrDie(ocfg) + } + fn3 := func(wg *sync.WaitGroup) { + defer wg.Done() + oa.CleanTidbClusterOrDie(cluster3) + oa.CleanTidbClusterOrDie(cluster4) + oa.DeployTidbClusterOrDie(cluster3) + oa.DeployTidbClusterOrDie(cluster4) + oa.CheckTidbClusterStatusOrDie(cluster3) + oa.CheckTidbClusterStatusOrDie(cluster4) + go oa.BeginInsertDataToOrDie(cluster3) + + // backup and restore + oa.BackupRestoreOrDie(cluster3, cluster4) + } + + var wg sync.WaitGroup + wg.Add(3) + go fn1(&wg) + go fn2(&wg) + go fn3(&wg) + wg.Wait() + + // check data regions disaster tolerance + for _, clusterInfo := range allClusters { + oa.CheckDataRegionDisasterToleranceOrDie(clusterInfo) + } - operatorInfo := &tests.OperatorConfig{ + glog.Infof("\nFinished.") +} + +func newOperatorConfig() *tests.OperatorConfig { + return &tests.OperatorConfig{ Namespace: "pingcap", ReleaseName: "operator", - Image: conf.OperatorImage, - Tag: conf.OperatorTag, + Image: cfg.OperatorImage, + Tag: cfg.OperatorTag, SchedulerImage: "mirantis/hypokube", SchedulerTag: "final", SchedulerFeatures: []string{ @@ -58,296 +186,48 @@ func main() { ImagePullPolicy: v1.PullIfNotPresent, TestMode: true, } +} - ns := os.Getenv("NAMESPACE") - context, err := apimachinery.SetupServerCert(ns, tests.WebhookServiceName) - if err != nil { - panic(err) - } - go tests.StartValidatingAdmissionWebhookServerOrDie(context) - - initTidbVersion, err := conf.GetTiDBVersion() - if err != nil { - glog.Fatal(err) - } - - name1 := "e2e-cluster1" - name2 := "e2e-cluster2" - name3 := "e2e-pd-replicas-1" +func newTidbClusterConfig(ns, clusterName, password string) *tests.TidbClusterConfig { + tidbVersion := cfg.GetTiDBVersionOrDie() topologyKey := "rack" - - clusterInfos := []*tests.TidbClusterConfig{ - { - Namespace: name1, - ClusterName: name1, - OperatorTag: conf.OperatorTag, - PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion), - StorageClassName: "local-storage", - Password: "", - UserName: "root", - InitSecretName: fmt.Sprintf("%s-set-secret", name1), - BackupSecretName: fmt.Sprintf("%s-backup-secret", name1), - BackupName: "backup", - Resources: map[string]string{ - "pd.resources.limits.cpu": "1000m", - "pd.resources.limits.memory": "2Gi", - "pd.resources.requests.cpu": "200m", - "pd.resources.requests.memory": "1Gi", - "tikv.resources.limits.cpu": "2000m", - "tikv.resources.limits.memory": "4Gi", - "tikv.resources.requests.cpu": "200m", - "tikv.resources.requests.memory": "1Gi", - "tidb.resources.limits.cpu": "2000m", - "tidb.resources.limits.memory": "4Gi", - "tidb.resources.requests.cpu": "200m", - "tidb.resources.requests.memory": "1Gi", - "discovery.image": conf.OperatorImage, - }, - Args: map[string]string{}, - Monitor: true, - BlockWriteConfig: blockwriter.Config{ - TableNum: 1, - Concurrency: 1, - BatchSize: 1, - RawSize: 1, - }, - TopologyKey: topologyKey, - EnableConfigMapRollout: true, - }, - { - Namespace: name2, - ClusterName: name2, - OperatorTag: conf.OperatorTag, - PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion), - StorageClassName: "local-storage", - Password: "admin", - UserName: "root", - InitSecretName: fmt.Sprintf("%s-set-secret", name2), - BackupSecretName: fmt.Sprintf("%s-backup-secret", name2), - BackupName: "backup", - Resources: map[string]string{ - "pd.resources.limits.cpu": "1000m", - "pd.resources.limits.memory": "2Gi", - "pd.resources.requests.cpu": "200m", - "pd.resources.requests.memory": "1Gi", - "tikv.resources.limits.cpu": "2000m", - "tikv.resources.limits.memory": "4Gi", - "tikv.resources.requests.cpu": "200m", - "tikv.resources.requests.memory": "1Gi", - "tidb.resources.limits.cpu": "2000m", - "tidb.resources.limits.memory": "4Gi", - "tidb.resources.requests.cpu": "200m", - "tidb.resources.requests.memory": "1Gi", - "discovery.image": conf.OperatorImage, - }, - Args: map[string]string{}, - Monitor: true, - BlockWriteConfig: blockwriter.Config{ - TableNum: 1, - Concurrency: 1, - BatchSize: 1, - RawSize: 1, - }, - TopologyKey: topologyKey, - EnableConfigMapRollout: false, + return &tests.TidbClusterConfig{ + Namespace: ns, + ClusterName: clusterName, + OperatorTag: cfg.OperatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", tidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", tidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", tidbVersion), + StorageClassName: "local-storage", + Password: password, + UserName: "root", + InitSecretName: fmt.Sprintf("%s-set-secret", clusterName), + BackupSecretName: fmt.Sprintf("%s-backup-secret", clusterName), + BackupName: "backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "200Mi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "200m", + "tikv.resources.requests.memory": "200Mi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "200m", + "tidb.resources.requests.memory": "200Mi", + "discovery.image": cfg.OperatorImage, }, - { - Namespace: name2, - ClusterName: name3, - OperatorTag: conf.OperatorTag, - PDImage: fmt.Sprintf("pingcap/pd:%s", initTidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", initTidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", initTidbVersion), - StorageClassName: "local-storage", - Password: "admin", - UserName: "root", - InitSecretName: fmt.Sprintf("%s-set-secret", name2), - BackupSecretName: fmt.Sprintf("%s-backup-secret", name2), - Resources: map[string]string{ - "pd.replicas": "1", - "discovery.image": conf.OperatorImage, - }, - - TopologyKey: topologyKey, + Args: map[string]string{}, + Monitor: true, + BlockWriteConfig: blockwriter.Config{ + TableNum: 1, + Concurrency: 1, + BatchSize: 1, + RawSize: 1, }, + TopologyKey: topologyKey, + EnableConfigMapRollout: true, } - - defer func() { - oa.DumpAllLogs(operatorInfo, clusterInfos) - }() - - oa.LabelNodesOrDie() - - // deploy operator - if err := oa.CleanOperator(operatorInfo); err != nil { - oa.DumpAllLogs(operatorInfo, nil) - glog.Fatal(err) - } - if err = oa.DeployOperator(operatorInfo); err != nil { - oa.DumpAllLogs(operatorInfo, nil) - glog.Fatal(err) - } - - // deploy tidbclusters - for _, clusterInfo := range clusterInfos { - if err = oa.CleanTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - if err = oa.DeployTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - for _, clusterInfo := range clusterInfos { - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - // check disaster tolerance - for _, clusterInfo := range clusterInfos { - oa.CheckDisasterToleranceOrDie(clusterInfo) - } - - for _, clusterInfo := range clusterInfos { - go oa.BeginInsertDataToOrDie(clusterInfo) - } - - // before upgrade cluster, register webhook first - oa.RegisterWebHookAndServiceOrDie(context, operatorInfo) - - // upgrade test - upgradeTidbVersions := conf.GetUpgradeTidbVersions() - for _, upgradeTidbVersion := range upgradeTidbVersions { - oldTidbMembersAssignedNodes := map[string]map[string]string{} - for _, clusterInfo := range clusterInfos { - assignedNodes, err := oa.GetTidbMemberAssignedNodes(clusterInfo) - if err != nil { - glog.Fatal(err) - } - oldTidbMembersAssignedNodes[clusterInfo.ClusterName] = assignedNodes - clusterInfo = clusterInfo.UpgradeAll(upgradeTidbVersion) - if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - // only check manual pause for 1 cluster - if len(clusterInfos) >= 1 { - oa.CheckManualPauseTiDBOrDie(clusterInfos[0]) - } - - for _, clusterInfo := range clusterInfos { - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - if err = oa.CheckTidbMemberAssignedNodes(clusterInfo, oldTidbMembersAssignedNodes[clusterInfo.ClusterName]); err != nil { - glog.Fatal(err) - } - } - } - - // update configuration on the fly - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo. - UpdatePdMaxReplicas(conf.PDMaxReplicas). - UpdatePDLogLevel("debug"). - UpdateTiKVGrpcConcurrency(conf.TiKVGrpcConcurrency). - UpdateTiDBTokenLimit(conf.TiDBTokenLimit) - if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - for _, clusterInfo := range clusterInfos { - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - } - - // after upgrade cluster, clean webhook - oa.CleanWebHookAndService(operatorInfo) - - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - for _, clusterInfo := range clusterInfos { - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo.ScalePD(3) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - for _, clusterInfo := range clusterInfos { - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo.ScaleTiKV(3) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - for _, clusterInfo := range clusterInfos { - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - for _, clusterInfo := range clusterInfos { - clusterInfo = clusterInfo.ScaleTiDB(1) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - } - for _, clusterInfo := range clusterInfos { - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } - } - - // check data regions disaster tolerance - for _, clusterInfo := range clusterInfos { - oa.CheckDataRegionDisasterToleranceOrDie(clusterInfo) - } - - // backup and restore - backupClusterInfo := clusterInfos[0] - restoreClusterInfo := &tests.TidbClusterConfig{} - copier.Copy(restoreClusterInfo, backupClusterInfo) - restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-other" - restoreClusterInfo.InitSecretName = fmt.Sprintf("%s-set-secret", restoreClusterInfo.ClusterName) - restoreClusterInfo.BackupSecretName = fmt.Sprintf("%s-backup-secret", restoreClusterInfo.ClusterName) - - if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil { - glog.Fatal(err) - } - if err = oa.DeployTidbCluster(restoreClusterInfo); err != nil { - glog.Fatal(err) - } - if err = oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil { - glog.Fatal(err) - } - - oa.BackupRestoreOrDie(backupClusterInfo, restoreClusterInfo) - - //clean temp dirs when e2e success - err = conf.CleanTempDirs() - if err != nil { - glog.Errorf("failed to clean temp dirs, this error can be ignored.") - } - glog.Infof("\nFinished.") } diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go index 90618aa670..7cca68beee 100644 --- a/tests/cmd/stability/main.go +++ b/tests/cmd/stability/main.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb-operator/tests/pkg/client" "github.com/pingcap/tidb-operator/tests/slack" "github.com/robfig/cron" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/util/logs" ) @@ -316,3 +317,66 @@ func run() { slack.SuccessCount++ glog.Infof("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339)) } + +func newOperatorConfig() *tests.OperatorConfig { + return &tests.OperatorConfig{ + Namespace: "pingcap", + ReleaseName: "operator", + Image: cfg.OperatorImage, + Tag: cfg.OperatorTag, + SchedulerImage: "gcr.io/google-containers/hyperkube", + SchedulerFeatures: []string{ + "StableScheduling=true", + }, + LogLevel: "2", + WebhookServiceName: tests.WebhookServiceName, + WebhookSecretName: "webhook-secret", + WebhookConfigName: "webhook-config", + ImagePullPolicy: v1.PullAlways, + TestMode: true, + } +} + +func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig { + tidbVersion := cfg.GetTiDBVersionOrDie() + topologyKey := "rack" + return &tests.TidbClusterConfig{ + Namespace: ns, + ClusterName: clusterName, + OperatorTag: cfg.OperatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", tidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", tidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", tidbVersion), + StorageClassName: "local-storage", + UserName: "root", + Password: "admin", + InitSecretName: fmt.Sprintf("%s-set-secret", clusterName), + BackupSecretName: fmt.Sprintf("%s-backup-secret", clusterName), + BackupName: "backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "8000m", + "tikv.resources.limits.memory": "16Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "8000m", + "tidb.resources.limits.memory": "8Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + "monitor.persistent": "true", + "discovery.image": cfg.OperatorImage, + "tikv.defaultcfBlockCacheSize": "8GB", + "tikv.writecfBlockCacheSize": "2GB", + }, + Args: map[string]string{ + "binlog.drainer.workerCount": "1024", + "binlog.drainer.txnBatch": "512", + }, + Monitor: true, + BlockWriteConfig: cfg.BlockWriter, + TopologyKey: topologyKey, + } +} diff --git a/tests/cmd/stability/stability.go b/tests/cmd/stability/stability.go deleted file mode 100644 index 8aca7b2204..0000000000 --- a/tests/cmd/stability/stability.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/pingcap/tidb-operator/tests" - v1 "k8s.io/api/core/v1" -) - -func newOperatorConfig() *tests.OperatorConfig { - return &tests.OperatorConfig{ - Namespace: "pingcap", - ReleaseName: "operator", - Image: cfg.OperatorImage, - Tag: cfg.OperatorTag, - SchedulerImage: "gcr.io/google-containers/hyperkube", - SchedulerFeatures: []string{ - "StableScheduling=true", - }, - LogLevel: "2", - WebhookServiceName: tests.WebhookServiceName, - WebhookSecretName: "webhook-secret", - WebhookConfigName: "webhook-config", - ImagePullPolicy: v1.PullAlways, - TestMode: true, - } -} - -func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig { - tidbVersion := cfg.GetTiDBVersionOrDie() - topologyKey := "rack" - return &tests.TidbClusterConfig{ - Namespace: ns, - ClusterName: clusterName, - OperatorTag: cfg.OperatorTag, - PDImage: fmt.Sprintf("pingcap/pd:%s", tidbVersion), - TiKVImage: fmt.Sprintf("pingcap/tikv:%s", tidbVersion), - TiDBImage: fmt.Sprintf("pingcap/tidb:%s", tidbVersion), - StorageClassName: "local-storage", - UserName: "root", - Password: "admin", - InitSecretName: fmt.Sprintf("%s-set-secret", clusterName), - BackupSecretName: fmt.Sprintf("%s-backup-secret", clusterName), - BackupName: "backup", - Resources: map[string]string{ - "pd.resources.limits.cpu": "1000m", - "pd.resources.limits.memory": "2Gi", - "pd.resources.requests.cpu": "200m", - "pd.resources.requests.memory": "1Gi", - "tikv.resources.limits.cpu": "8000m", - "tikv.resources.limits.memory": "16Gi", - "tikv.resources.requests.cpu": "1000m", - "tikv.resources.requests.memory": "2Gi", - "tidb.resources.limits.cpu": "8000m", - "tidb.resources.limits.memory": "8Gi", - "tidb.resources.requests.cpu": "500m", - "tidb.resources.requests.memory": "1Gi", - "monitor.persistent": "true", - "discovery.image": cfg.OperatorImage, - "tikv.defaultcfBlockCacheSize": "8GB", - "tikv.writecfBlockCacheSize": "2GB", - }, - Args: map[string]string{ - "binlog.drainer.workerCount": "1024", - "binlog.drainer.txnBatch": "512", - }, - Monitor: true, - BlockWriteConfig: cfg.BlockWriter, - TopologyKey: topologyKey, - } -} diff --git a/tests/failover.go b/tests/failover.go index 0342a8fea5..6dcbb89d09 100644 --- a/tests/failover.go +++ b/tests/failover.go @@ -746,7 +746,7 @@ func (oa *operatorActions) CheckOperatorAvailable(operatorConfig *OperatorConfig } func (oa *operatorActions) CheckTidbClustersAvailable(infos []*TidbClusterConfig) error { - return wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) { + return wait.Poll(3*time.Second, DefaultPollTimeout, func() (bool, error) { for _, info := range infos { succ, err := oa.addDataToCluster(info) if err != nil {