From 6e36dcaf90d3aee269d0554e37324dfce628d751 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Wed, 20 Mar 2019 17:02:09 +0800 Subject: [PATCH] multi tidb cluster testing (#334) --- go.mod | 1 + go.sum | 2 + tests/actions.go | 6 +- tests/cmd/e2e/main.go | 281 ++++++++++--------- tests/cmd/stability/main.go | 290 ++++++++++++++++++++ vendor/github.com/jinzhu/copier/Guardfile | 3 + vendor/github.com/jinzhu/copier/License | 20 ++ vendor/github.com/jinzhu/copier/README.md | 100 +++++++ vendor/github.com/jinzhu/copier/copier.go | 185 +++++++++++++ vendor/github.com/jinzhu/copier/wercker.yml | 23 ++ vendor/modules.txt | 2 + 11 files changed, 784 insertions(+), 129 deletions(-) create mode 100644 tests/cmd/stability/main.go create mode 100644 vendor/github.com/jinzhu/copier/Guardfile create mode 100644 vendor/github.com/jinzhu/copier/License create mode 100644 vendor/github.com/jinzhu/copier/README.md create mode 100644 vendor/github.com/jinzhu/copier/copier.go create mode 100644 vendor/github.com/jinzhu/copier/wercker.yml diff --git a/go.mod b/go.mod index 062ef16299..7b07961781 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.4.1 // indirect github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect github.com/hpcloud/tail v1.0.0 // indirect + github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 github.com/jonboulle/clockwork v0.1.0 // indirect github.com/json-iterator/go v1.1.5 // indirect github.com/juju/errors v0.0.0-20180806074554-22422dad46e1 diff --git a/go.sum b/go.sum index 8efb69e4d3..ab4fbbc9a6 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,8 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5 github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs= +github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= diff --git a/tests/actions.go b/tests/actions.go index 49f8f642b4..36afc3728b 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -1115,8 +1115,8 @@ func (oa *operatorActions) Restore(from *TidbClusterInfo, to *TidbClusterInfo) e setString := to.HelmSetString(sets) restoreName := fmt.Sprintf("%s-restore", from.ClusterName) - cmd := fmt.Sprintf("helm install -n %s --namespace %s /charts/%s/tidb-backup --set-string %s", - restoreName, to.Namespace, to.OperatorTag, setString) + cmd := fmt.Sprintf("helm upgrade %s /charts/%s/tidb-backup --set-string %s", + restoreName, to.OperatorTag, setString) glog.Infof("install restore [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -1222,7 +1222,7 @@ func (oa *operatorActions) CreateSecret(info *TidbClusterInfo) error { _, err := oa.kubeCli.CoreV1().Secrets(info.Namespace).Create(&initSecret) if err != nil && !releaseIsExist(err) { - return err + return err } backupSecret := corev1.Secret{ diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index 313c89c251..1b27fe40a1 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -14,11 +14,12 @@ package main import ( + "fmt" "net/http" _ "net/http/pprof" - "time" "github.com/golang/glog" + "github.com/jinzhu/copier" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/tests" "github.com/pingcap/tidb-operator/tests/backup" @@ -29,12 +30,6 @@ import ( "k8s.io/client-go/rest" ) -func perror(err error) { - if err != nil { - glog.Fatal(err) - } -} - func main() { logs.InitLogs() defer logs.FlushLogs() @@ -49,6 +44,12 @@ func main() { glog.Info(http.ListenAndServe("localhost:6060", nil)) }() + // TODO read these args from config + beginTidbVersion := "v2.1.0" + toTidbVersion := "v2.1.4" + operatorTag := "master" + operatorImage := "pingcap/tidb-operator:latest" + cfg, err := rest.InClusterConfig() if err != nil { glog.Fatalf("failed to get config: %v", err) @@ -67,11 +68,85 @@ func main() { operatorInfo := &tests.OperatorInfo{ Namespace: "pingcap", ReleaseName: "operator", - Image: "pingcap/tidb-operator:latest", - Tag: "master", + Image: operatorImage, + Tag: operatorTag, SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1", LogLevel: "2", } + + // create database and table and insert a column for test backup and restore + initSql := `"create database record;use record;create table test(t char(32))"` + + clusterInfos := []*tests.TidbClusterInfo{ + { + Namespace: "e2e-cluster1", + ClusterName: "e2e-cluster1", + OperatorTag: operatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + StorageClassName: "local-storage", + Password: "admin", + InitSql: initSql, + UserName: "root", + InitSecretName: "demo-set-secret", + BackupSecretName: "demo-backup-secret", + BackupPVC: "test-backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, + Monitor: true, + }, + { + Namespace: "e2e-cluster2", + ClusterName: "e2e-cluster2", + OperatorTag: "master", + PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + StorageClassName: "local-storage", + Password: "admin", + InitSql: initSql, + UserName: "root", + InitSecretName: "demo-set-secret", + BackupSecretName: "demo-backup-secret", + BackupPVC: "test-backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, + Monitor: true, + }, + } + + defer func() { + oa.DumpAllLogs(operatorInfo, clusterInfos) + }() + + // deploy operator if err := oa.CleanOperator(operatorInfo); err != nil { oa.DumpAllLogs(operatorInfo, nil) glog.Fatal(err) @@ -81,163 +156,117 @@ func main() { glog.Fatal(err) } - // create database and table and insert a column for test backup and restore - initSql := `"create database record;use record;create table test(t char(32))"` - - clusterInfo := &tests.TidbClusterInfo{ - BackupPVC: "test-backup", - Namespace: "tidb", - ClusterName: "demo", - OperatorTag: "master", - PDImage: "pingcap/pd:v2.1.0", - TiKVImage: "pingcap/tikv:v2.1.0", - TiDBImage: "pingcap/tidb:v2.1.0", - StorageClassName: "local-storage", - Password: "admin", - InitSql: initSql, - UserName: "root", - InitSecretName: "demo-set-secret", - BackupSecretName: "demo-backup-secret", - Resources: map[string]string{ - "pd.resources.limits.cpu": "1000m", - "pd.resources.limits.memory": "2Gi", - "pd.resources.requests.cpu": "200m", - "pd.resources.requests.memory": "1Gi", - "tikv.resources.limits.cpu": "2000m", - "tikv.resources.limits.memory": "4Gi", - "tikv.resources.requests.cpu": "1000m", - "tikv.resources.requests.memory": "2Gi", - "tidb.resources.limits.cpu": "2000m", - "tidb.resources.limits.memory": "4Gi", - "tidb.resources.requests.cpu": "500m", - "tidb.resources.requests.memory": "1Gi", - }, - Args: map[string]string{}, + // deploy tidbclusters + for _, clusterInfo := range clusterInfos { + if err = oa.CleanTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + if err = oa.DeployTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } } - if err = oa.CleanTidbCluster(clusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) - glog.Fatal(err) - } - if err = oa.DeployTidbCluster(clusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) - glog.Fatal(err) + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } } - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) - glog.Fatal(err) + + var workloads []workload.Workload + for _, clusterInfo := range clusterInfos { + workload := ddl.New(clusterInfo.DSN("test"), 1, 1) + workloads = append(workloads, workload) } err = workload.Run(func() error { - clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - return err + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } } - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } } - clusterInfo = clusterInfo.ScalePD(3) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScalePD(3) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } } - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } } - clusterInfo = clusterInfo.ScaleTiKV(3) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiKV(3) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } } - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } } - clusterInfo = clusterInfo.ScaleTiDB(1) - if err := oa.ScaleTidbCluster(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiDB(1) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } } - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - return err + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } } return nil - }, ddl.New(clusterInfo.DSN("test"), 1, 1)) + }, workloads...) if err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) glog.Fatal(err) } - clusterInfo = clusterInfo.UpgradeAll("v2.1.4") - if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) - glog.Fatal(err) - } - if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo}) - glog.Fatal(err) + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.UpgradeAll(toTidbVersion) + if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } } - restoreClusterInfo := &tests.TidbClusterInfo{ - BackupPVC: "test-backup", - Namespace: "tidb", - ClusterName: "demo2", - OperatorTag: "master", - PDImage: "pingcap/pd:v2.1.0", - TiKVImage: "pingcap/tikv:v2.1.0", - TiDBImage: "pingcap/tidb:v2.1.0", - StorageClassName: "local-storage", - Password: "admin", - InitSql: initSql, - UserName: "root", - InitSecretName: "demo2-set-secret", - BackupSecretName: "demo2-backup-secret", - Resources: map[string]string{ - "pd.resources.limits.cpu": "1000m", - "pd.resources.limits.memory": "2Gi", - "pd.resources.requests.cpu": "200m", - "pd.resources.requests.memory": "1Gi", - "tikv.resources.limits.cpu": "2000m", - "tikv.resources.limits.memory": "4Gi", - "tikv.resources.requests.cpu": "1000m", - "tikv.resources.requests.memory": "2Gi", - "tidb.resources.limits.cpu": "2000m", - "tidb.resources.limits.memory": "4Gi", - "tidb.resources.requests.cpu": "500m", - "tidb.resources.requests.memory": "1Gi", - }, - Args: map[string]string{}, + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } } + // backup and restore + backupClusterInfo := clusterInfos[0] + restoreClusterInfo := &tests.TidbClusterInfo{} + copier.Copy(restoreClusterInfo, backupClusterInfo) + restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-restore" + if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo}) glog.Fatal(err) } if err = oa.DeployTidbCluster(restoreClusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo}) glog.Fatal(err) } if err = oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo}) glog.Fatal(err) } - backupCase := backup.NewBackupCase(oa, clusterInfo, restoreClusterInfo) + backupCase := backup.NewBackupCase(oa, backupClusterInfo, restoreClusterInfo) if err := backupCase.Run(); err != nil { - oa.DumpAllLogs(operatorInfo, []*tests.TidbClusterInfo{clusterInfo, restoreClusterInfo}) - glog.Fatal(err) - } - - fa := tests.NewFaultTriggerAction(cli, kubeCli, conf) - if err := fa.StopETCD("172.16.4.171"); err != nil { - glog.Fatal(err) - } - - time.Sleep(1 * time.Minute) - - if err := fa.StartETCD("172.16.4.171"); err != nil { glog.Fatal(err) } } diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go new file mode 100644 index 0000000000..001973b168 --- /dev/null +++ b/tests/cmd/stability/main.go @@ -0,0 +1,290 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License.package spec + +package main + +import ( + "fmt" + "net/http" + _ "net/http/pprof" + "time" + + "github.com/golang/glog" + "github.com/jinzhu/copier" + "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" + "github.com/pingcap/tidb-operator/tests" + "github.com/pingcap/tidb-operator/tests/backup" + "github.com/pingcap/tidb-operator/tests/pkg/workload" + "github.com/pingcap/tidb-operator/tests/pkg/workload/ddl" + "k8s.io/apiserver/pkg/util/logs" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + conf := tests.NewConfig() + err := conf.Parse() + if err != nil { + glog.Fatalf("failed to parse config: %v", err) + } + + go func() { + glog.Info(http.ListenAndServe("localhost:6060", nil)) + }() + + // TODO read these args from config + beginTidbVersion := "v2.1.0" + toTidbVersion := "v2.1.4" + operatorTag := "master" + operatorImage := "pingcap/tidb-operator:latest" + + cfg, err := rest.InClusterConfig() + if err != nil { + glog.Fatalf("failed to get config: %v", err) + } + cli, err := versioned.NewForConfig(cfg) + if err != nil { + glog.Fatalf("failed to create Clientset: %v", err) + } + kubeCli, err := kubernetes.NewForConfig(cfg) + if err != nil { + glog.Fatalf("failed to get kubernetes Clientset: %v", err) + } + + oa := tests.NewOperatorActions(cli, kubeCli, conf) + + operatorInfo := &tests.OperatorInfo{ + Namespace: "pingcap", + ReleaseName: "operator", + Image: operatorImage, + Tag: operatorTag, + SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1", + LogLevel: "2", + } + + // create database and table and insert a column for test backup and restore + initSql := `"create database record;use record;create table test(t char(32))"` + + clusterInfos := []*tests.TidbClusterInfo{ + { + Namespace: "e2e-cluster1", + ClusterName: "e2e-cluster1", + OperatorTag: operatorTag, + PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + StorageClassName: "local-storage", + Password: "admin", + InitSql: initSql, + UserName: "root", + InitSecretName: "demo-set-secret", + BackupSecretName: "demo-backup-secret", + BackupPVC: "test-backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, + Monitor: true, + }, + { + Namespace: "e2e-cluster2", + ClusterName: "e2e-cluster2", + OperatorTag: "master", + PDImage: fmt.Sprintf("pingcap/pd:%s", beginTidbVersion), + TiKVImage: fmt.Sprintf("pingcap/tikv:%s", beginTidbVersion), + TiDBImage: fmt.Sprintf("pingcap/tidb:%s", beginTidbVersion), + StorageClassName: "local-storage", + Password: "admin", + InitSql: initSql, + UserName: "root", + InitSecretName: "demo-set-secret", + BackupSecretName: "demo-backup-secret", + BackupPVC: "test-backup", + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, + Monitor: true, + }, + } + + defer func() { + oa.DumpAllLogs(operatorInfo, clusterInfos) + }() + + // deploy operator + if err := oa.CleanOperator(operatorInfo); err != nil { + oa.DumpAllLogs(operatorInfo, nil) + glog.Fatal(err) + } + if err = oa.DeployOperator(operatorInfo); err != nil { + oa.DumpAllLogs(operatorInfo, nil) + glog.Fatal(err) + } + + // deploy tidbclusters + for _, clusterInfo := range clusterInfos { + if err = oa.CleanTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + if err = oa.DeployTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + } + + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } + } + + var workloads []workload.Workload + for _, clusterInfo := range clusterInfos { + workload := ddl.New(clusterInfo.DSN("test"), 1, 1) + workloads = append(workloads, workload) + } + + err = workload.Run(func() error { + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } + } + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } + } + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScalePD(3) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } + } + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } + } + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiKV(3) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } + } + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } + } + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.ScaleTiDB(1) + if err := oa.ScaleTidbCluster(clusterInfo); err != nil { + return err + } + } + for _, clusterInfo := range clusterInfos { + if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { + return err + } + } + + return nil + }, workloads...) + + if err != nil { + glog.Fatal(err) + } + + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } + } + + for _, clusterInfo := range clusterInfos { + clusterInfo = clusterInfo.UpgradeAll(toTidbVersion) + if err = oa.UpgradeTidbCluster(clusterInfo); err != nil { + glog.Fatal(err) + } + } + + for _, clusterInfo := range clusterInfos { + if err = oa.CheckTidbClusterStatus(clusterInfo); err != nil { + glog.Fatal(err) + } + } + + // backup and restore + backupClusterInfo := clusterInfos[0] + restoreClusterInfo := &tests.TidbClusterInfo{} + copier.Copy(restoreClusterInfo, backupClusterInfo) + restoreClusterInfo.ClusterName = restoreClusterInfo.ClusterName + "-restore" + + if err = oa.CleanTidbCluster(restoreClusterInfo); err != nil { + glog.Fatal(err) + } + if err = oa.DeployTidbCluster(restoreClusterInfo); err != nil { + glog.Fatal(err) + } + if err = oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil { + glog.Fatal(err) + } + + backupCase := backup.NewBackupCase(oa, backupClusterInfo, restoreClusterInfo) + + if err := backupCase.Run(); err != nil { + glog.Fatal(err) + } + + fa := tests.NewFaultTriggerAction(cli, kubeCli, conf) + if err := fa.StopETCD("172.16.4.171"); err != nil { + glog.Fatal(err) + } + + time.Sleep(1 * time.Minute) + + if err := fa.StartETCD("172.16.4.171"); err != nil { + glog.Fatal(err) + } +} diff --git a/vendor/github.com/jinzhu/copier/Guardfile b/vendor/github.com/jinzhu/copier/Guardfile new file mode 100644 index 0000000000..0b860b0653 --- /dev/null +++ b/vendor/github.com/jinzhu/copier/Guardfile @@ -0,0 +1,3 @@ +guard 'gotest' do + watch(%r{\.go$}) +end diff --git a/vendor/github.com/jinzhu/copier/License b/vendor/github.com/jinzhu/copier/License new file mode 100644 index 0000000000..e2dc5381e1 --- /dev/null +++ b/vendor/github.com/jinzhu/copier/License @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/jinzhu/copier/README.md b/vendor/github.com/jinzhu/copier/README.md new file mode 100644 index 0000000000..f929b46793 --- /dev/null +++ b/vendor/github.com/jinzhu/copier/README.md @@ -0,0 +1,100 @@ +# Copier + + I am a copier, I copy everything from one to another + +[![wercker status](https://app.wercker.com/status/9d44ad2d4e6253929c8fb71359effc0b/s/master "wercker status")](https://app.wercker.com/project/byKey/9d44ad2d4e6253929c8fb71359effc0b) + +## Features + +* Copy from field to field with same name +* Copy from method to field with same name +* Copy from field to method with same name +* Copy from slice to slice +* Copy from struct to slice + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/jinzhu/copier" +) + +type User struct { + Name string + Role string + Age int32 +} + +func (user *User) DoubleAge() int32 { + return 2 * user.Age +} + +type Employee struct { + Name string + Age int32 + DoubleAge int32 + EmployeId int64 + SuperRule string +} + +func (employee *Employee) Role(role string) { + employee.SuperRule = "Super " + role +} + +func main() { + var ( + user = User{Name: "Jinzhu", Age: 18, Role: "Admin"} + users = []User{{Name: "Jinzhu", Age: 18, Role: "Admin"}, {Name: "jinzhu 2", Age: 30, Role: "Dev"}} + employee = Employee{} + employees = []Employee{} + ) + + copier.Copy(&employee, &user) + + fmt.Printf("%#v \n", employee) + // Employee{ + // Name: "Jinzhu", // Copy from field + // Age: 18, // Copy from field + // DoubleAge: 36, // Copy from method + // EmployeeId: 0, // Ignored + // SuperRule: "Super Admin", // Copy to method + // } + + // Copy struct to slice + copier.Copy(&employees, &user) + + fmt.Printf("%#v \n", employees) + // []Employee{ + // {Name: "Jinzhu", Age: 18, DoubleAge: 36, EmployeId: 0, SuperRule: "Super Admin"} + // } + + // Copy slice to slice + employees = []Employee{} + copier.Copy(&employees, &users) + + fmt.Printf("%#v \n", employees) + // []Employee{ + // {Name: "Jinzhu", Age: 18, DoubleAge: 36, EmployeId: 0, SuperRule: "Super Admin"}, + // {Name: "jinzhu 2", Age: 30, DoubleAge: 60, EmployeId: 0, SuperRule: "Super Dev"}, + // } +} +``` + +## Contributing + +You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. + +# Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](https://github.com/jinzhu/copier/blob/master/License). diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go new file mode 100644 index 0000000000..ecbddffb0f --- /dev/null +++ b/vendor/github.com/jinzhu/copier/copier.go @@ -0,0 +1,185 @@ +package copier + +import ( + "database/sql" + "errors" + "reflect" +) + +// Copy copy things +func Copy(toValue interface{}, fromValue interface{}) (err error) { + var ( + isSlice bool + amount = 1 + from = indirect(reflect.ValueOf(fromValue)) + to = indirect(reflect.ValueOf(toValue)) + ) + + if !to.CanAddr() { + return errors.New("copy to value is unaddressable") + } + + // Return is from value is invalid + if !from.IsValid() { + return + } + + // Just set it if possible to assign + if from.Type().AssignableTo(to.Type()) { + to.Set(from) + return + } + + fromType := indirectType(from.Type()) + toType := indirectType(to.Type()) + + if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct { + return + } + + if to.Kind() == reflect.Slice { + isSlice = true + if from.Kind() == reflect.Slice { + amount = from.Len() + } + } + + for i := 0; i < amount; i++ { + var dest, source reflect.Value + + if isSlice { + // source + if from.Kind() == reflect.Slice { + source = indirect(from.Index(i)) + } else { + source = indirect(from) + } + + // dest + dest = indirect(reflect.New(toType).Elem()) + } else { + source = indirect(from) + dest = indirect(to) + } + + // Copy from field to field or method + for _, field := range deepFields(fromType) { + name := field.Name + + if fromField := source.FieldByName(name); fromField.IsValid() { + // has field + if toField := dest.FieldByName(name); toField.IsValid() { + if toField.CanSet() { + if !set(toField, fromField) { + if err := Copy(toField.Addr().Interface(), fromField.Interface()); err != nil { + return err + } + } + } + } else { + // try to set to method + var toMethod reflect.Value + if dest.CanAddr() { + toMethod = dest.Addr().MethodByName(name) + } else { + toMethod = dest.MethodByName(name) + } + + if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) { + toMethod.Call([]reflect.Value{fromField}) + } + } + } + } + + // Copy from method to field + for _, field := range deepFields(toType) { + name := field.Name + + var fromMethod reflect.Value + if source.CanAddr() { + fromMethod = source.Addr().MethodByName(name) + } else { + fromMethod = source.MethodByName(name) + } + + if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 { + if toField := dest.FieldByName(name); toField.IsValid() && toField.CanSet() { + values := fromMethod.Call([]reflect.Value{}) + if len(values) >= 1 { + set(toField, values[0]) + } + } + } + } + + if isSlice { + if dest.Addr().Type().AssignableTo(to.Type().Elem()) { + to.Set(reflect.Append(to, dest.Addr())) + } else if dest.Type().AssignableTo(to.Type().Elem()) { + to.Set(reflect.Append(to, dest)) + } + } + } + return +} + +func deepFields(reflectType reflect.Type) []reflect.StructField { + var fields []reflect.StructField + + if reflectType = indirectType(reflectType); reflectType.Kind() == reflect.Struct { + for i := 0; i < reflectType.NumField(); i++ { + v := reflectType.Field(i) + if v.Anonymous { + fields = append(fields, deepFields(v.Type)...) + } else { + fields = append(fields, v) + } + } + } + + return fields +} + +func indirect(reflectValue reflect.Value) reflect.Value { + for reflectValue.Kind() == reflect.Ptr { + reflectValue = reflectValue.Elem() + } + return reflectValue +} + +func indirectType(reflectType reflect.Type) reflect.Type { + for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice { + reflectType = reflectType.Elem() + } + return reflectType +} + +func set(to, from reflect.Value) bool { + if from.IsValid() { + if to.Kind() == reflect.Ptr { + //set `to` to nil if from is nil + if from.Kind() == reflect.Ptr && from.IsNil() { + to.Set(reflect.Zero(to.Type())) + return true + } else if to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + } + to = to.Elem() + } + + if from.Type().ConvertibleTo(to.Type()) { + to.Set(from.Convert(to.Type())) + } else if scanner, ok := to.Addr().Interface().(sql.Scanner); ok { + err := scanner.Scan(from.Interface()) + if err != nil { + return false + } + } else if from.Kind() == reflect.Ptr { + return set(to, from.Elem()) + } else { + return false + } + } + return true +} diff --git a/vendor/github.com/jinzhu/copier/wercker.yml b/vendor/github.com/jinzhu/copier/wercker.yml new file mode 100644 index 0000000000..5e6ce981dc --- /dev/null +++ b/vendor/github.com/jinzhu/copier/wercker.yml @@ -0,0 +1,23 @@ +box: golang + +build: + steps: + - setup-go-workspace + + # Gets the dependencies + - script: + name: go get + code: | + go get + + # Build the project + - script: + name: go build + code: | + go build ./... + + # Test the project + - script: + name: go test + code: | + go test ./... diff --git a/vendor/modules.txt b/vendor/modules.txt index e963735bad..dccfd7a16a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -141,6 +141,8 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile +# github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 +github.com/jinzhu/copier # github.com/jonboulle/clockwork v0.1.0 github.com/jonboulle/clockwork # github.com/json-iterator/go v1.1.5