-
Notifications
You must be signed in to change notification settings - Fork 500
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
stability: add scale & upgrade case functions #309
Changes from 4 commits
5048623
f633bba
8cfde6c
2bffb30
4aa1947
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,11 +23,13 @@ import ( | |
|
||
_ "github.com/go-sql-driver/mysql" | ||
"github.com/golang/glog" | ||
"github.com/pingcap/errors" | ||
"github.com/pingcap/kvproto/pkg/metapb" | ||
"github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" | ||
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" | ||
"github.com/pingcap/tidb-operator/pkg/controller" | ||
"github.com/pingcap/tidb-operator/pkg/label" | ||
"k8s.io/api/apps/v1beta1" | ||
batchv1 "k8s.io/api/batch/v1" | ||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
@@ -101,6 +103,8 @@ type operatorActions struct { | |
pdControl controller.PDControlInterface | ||
} | ||
|
||
var _ = OperatorActions(&operatorActions{}) | ||
|
||
type OperatorInfo struct { | ||
Namespace string | ||
ReleaseName string | ||
|
@@ -120,7 +124,7 @@ type TidbClusterInfo struct { | |
StorageClassName string | ||
Password string | ||
RecordCount string | ||
InsertBetchSize string | ||
InsertBatchSize string | ||
Resources map[string]string | ||
Args map[string]string | ||
} | ||
|
@@ -335,6 +339,7 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterInfo) error { | |
|
||
return true, nil | ||
}); err != nil { | ||
glog.Infof("check tidb cluster status failed: %s", err.Error()) | ||
return fmt.Errorf("failed to waiting for tidbcluster %s/%s ready in 10 minutes", ns, tcName) | ||
} | ||
|
||
|
@@ -349,10 +354,44 @@ func (oa *operatorActions) StopInsertDataTo(info *TidbClusterInfo) error { | |
return nil | ||
} | ||
|
||
func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterInfo) error { return nil } | ||
func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterInfo) error { return nil } | ||
func (oa *operatorActions) DeployMonitor(info *TidbClusterInfo) error { return nil } | ||
func (oa *operatorActions) CleanMonitor(info *TidbClusterInfo) error { return nil } | ||
func chartPath(name string, tag string) string { | ||
return "/charts/" + tag + "/" + name | ||
} | ||
|
||
func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterInfo) error { | ||
cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", | ||
info.ClusterName, chartPath("tidb-cluster", info.OperatorTag), info.HelmSetString()) | ||
glog.Info("[SCALE] " + cmd) | ||
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() | ||
if err != nil { | ||
return errors.Wrapf(err, "failed to scale tidb cluster: %s", string(res)) | ||
} | ||
return nil | ||
} | ||
|
||
func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterInfo) error { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. change to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. But how to upgrade only one component (eg. just upgrade tidb) ? |
||
cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", | ||
info.ClusterName, chartPath("tidb-cluster", info.OperatorTag), info.HelmSetString()) | ||
glog.Info("[UPGRADE] " + cmd) | ||
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() | ||
if err != nil { | ||
return errors.Wrapf(err, "failed to upgrade tidb cluster: %s", string(res)) | ||
} | ||
return nil | ||
} | ||
|
||
func (oa *operatorActions) DeployMonitor(info *TidbClusterInfo) error { return nil } | ||
func (oa *operatorActions) CleanMonitor(info *TidbClusterInfo) error { return nil } | ||
|
||
func getComponentContainer(set *v1beta1.StatefulSet) (corev1.Container, bool) { | ||
name := set.Labels[label.ComponentLabelKey] | ||
for _, c := range set.Spec.Template.Spec.Containers { | ||
if c.Name == name { | ||
return c, true | ||
} | ||
} | ||
return corev1.Container{}, false | ||
} | ||
|
||
func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, error) { | ||
tcName := tc.GetName() | ||
|
@@ -391,6 +430,11 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err | |
ns, pdSetName, pdSet.Status.ReadyReplicas, pdSet.Status.Replicas) | ||
return false, nil | ||
} | ||
if c, ok := getComponentContainer(pdSet); !ok || tc.Spec.PD.Image != c.Image { | ||
glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s", | ||
ns, pdSetName, c.Image, tc.Spec.PD.Image) | ||
return false, nil | ||
} | ||
|
||
for _, member := range tc.Status.PD.Members { | ||
if !member.Health { | ||
|
@@ -451,6 +495,11 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e | |
ns, tikvSetName, tikvSet.Status.ReadyReplicas, tikvSet.Status.Replicas) | ||
return false, nil | ||
} | ||
if c, ok := getComponentContainer(tikvSet); !ok || tc.Spec.TiKV.Image != c.Image { | ||
glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", | ||
ns, tikvSetName, c.Image, tc.Spec.TiKV.Image) | ||
return false, nil | ||
} | ||
|
||
for _, store := range tc.Status.TiKV.Stores { | ||
if store.State != v1alpha1.TiKVStateUp { | ||
|
@@ -500,6 +549,11 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e | |
ns, tidbSetName, tidbSet.Status.ReadyReplicas, tidbSet.Status.Replicas) | ||
return false, nil | ||
} | ||
if c, ok := getComponentContainer(tidbSet); !ok || tc.Spec.TiDB.Image != c.Image { | ||
glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", | ||
ns, tidbSetName, c.Image, tc.Spec.TiDB.Image) | ||
return false, nil | ||
} | ||
|
||
_, err = oa.kubeCli.CoreV1().Services(ns).Get(tidbSetName, metav1.GetOptions{}) | ||
if err != nil { | ||
|
@@ -568,8 +622,9 @@ outerLoop: | |
for _, pod := range podList.Items { | ||
podName := pod.GetName() | ||
if pod.Labels[label.ClusterIDLabelKey] != clusterID { | ||
return false, fmt.Errorf("tidbcluster %s/%s's pod %s's label %s not equals %s ", | ||
glog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ", | ||
ns, tcName, podName, label.ClusterIDLabelKey, clusterID) | ||
return false, nil | ||
} | ||
|
||
component := pod.Labels[label.ComponentLabelKey] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
package tests | ||
|
||
import "strconv" | ||
|
||
func (tc *TidbClusterInfo) set(name string, value string) (string, bool) { | ||
// NOTE: not thread-safe, maybe make info struct immutable | ||
if tc.Args == nil { | ||
tc.Args = make(map[string]string) | ||
} | ||
origVal, ok := tc.Args[name] | ||
tc.Args[name] = value | ||
return origVal, ok | ||
} | ||
|
||
func (tc *TidbClusterInfo) ScalePD(replicas uint) *TidbClusterInfo { | ||
tc.set("pd.replicas", strconv.Itoa(int(replicas))) | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) ScaleTiKV(replicas uint) *TidbClusterInfo { | ||
tc.set("tikv.replicas", strconv.Itoa(int(replicas))) | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) ScaleTiDB(replicas uint) *TidbClusterInfo { | ||
tc.set("tidb.replicas", strconv.Itoa(int(replicas))) | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) UpgradePD(image string) *TidbClusterInfo { | ||
tc.PDImage = image | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) UpgradeTiKV(image string) *TidbClusterInfo { | ||
tc.TiKVImage = image | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) UpgradeTiDB(image string) *TidbClusterInfo { | ||
tc.TiDBImage = image | ||
return tc | ||
} | ||
|
||
func (tc *TidbClusterInfo) UpgradeAll(tag string) *TidbClusterInfo { | ||
return tc. | ||
UpgradePD("pingcap/pd:" + tag). | ||
UpgradeTiKV("pingcap/tikv:" + tag). | ||
UpgradeTiDB("pingcap/tidb:" + tag) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,12 @@ import ( | |
"k8s.io/client-go/rest" | ||
) | ||
|
||
func perror(err error) { | ||
if err != nil { | ||
glog.Fatal(err) | ||
} | ||
} | ||
|
||
func main() { | ||
flag.Parse() | ||
logs.InitLogs() | ||
|
@@ -52,12 +58,8 @@ func main() { | |
SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1", | ||
LogLevel: "2", | ||
} | ||
if err := oa.CleanOperator(operatorInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
if err := oa.DeployOperator(operatorInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
perror(oa.CleanOperator(operatorInfo)) | ||
perror(oa.DeployOperator(operatorInfo)) | ||
|
||
clusterInfo := &tests.TidbClusterInfo{ | ||
Namespace: "tidb", | ||
|
@@ -68,18 +70,35 @@ func main() { | |
TiDBImage: "pingcap/tidb:v2.1.3", | ||
StorageClassName: "local-storage", | ||
Password: "admin", | ||
Args: map[string]string{}, | ||
} | ||
if err := oa.CleanTidbCluster(clusterInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
if err := oa.DeployTidbCluster(clusterInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { | ||
glog.Fatal(err) | ||
Resources: map[string]string{ | ||
"pd.resources.limits.cpu": "1000m", | ||
"pd.resources.limits.memory": "2Gi", | ||
"pd.resources.requests.cpu": "200m", | ||
"pd.resources.requests.memory": "1Gi", | ||
"tikv.resources.limits.cpu": "2000m", | ||
"tikv.resources.limits.memory": "4Gi", | ||
"tikv.resources.requests.cpu": "1000m", | ||
"tikv.resources.requests.memory": "2Gi", | ||
"tidb.resources.limits.cpu": "2000m", | ||
"tidb.resources.limits.memory": "4Gi", | ||
"tidb.resources.requests.cpu": "500m", | ||
"tidb.resources.requests.memory": "1Gi", | ||
}, | ||
Args: map[string]string{}, | ||
} | ||
|
||
perror(oa.CleanTidbCluster(clusterInfo)) | ||
perror(oa.DeployTidbCluster(clusterInfo)) | ||
perror(oa.CheckTidbClusterStatus(clusterInfo)) | ||
|
||
clusterInfo = clusterInfo.ScaleTiDB(3) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should scale in and scale out all the components: pd, tikv and tidb. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, you can do this in another PR. |
||
perror(oa.ScaleTidbCluster(clusterInfo)) | ||
perror(oa.CheckTidbClusterStatus(clusterInfo)) | ||
|
||
clusterInfo = clusterInfo.UpgradeAll("v2.1.4") | ||
perror(oa.UpgradeTidbCluster(clusterInfo)) | ||
perror(oa.CheckTidbClusterStatus(clusterInfo)) | ||
|
||
restoreClusterInfo := &tests.TidbClusterInfo{ | ||
Namespace: "tidb", | ||
ClusterName: "demo2", | ||
|
@@ -89,16 +108,25 @@ func main() { | |
TiDBImage: "pingcap/tidb:v2.1.3", | ||
StorageClassName: "local-storage", | ||
Password: "admin", | ||
Args: map[string]string{}, | ||
Resources: map[string]string{ | ||
"pd.resources.limits.cpu": "1000m", | ||
"pd.resources.limits.memory": "2Gi", | ||
"pd.resources.requests.cpu": "200m", | ||
"pd.resources.requests.memory": "1Gi", | ||
"tikv.resources.limits.cpu": "2000m", | ||
"tikv.resources.limits.memory": "4Gi", | ||
"tikv.resources.requests.cpu": "1000m", | ||
"tikv.resources.requests.memory": "2Gi", | ||
"tidb.resources.limits.cpu": "2000m", | ||
"tidb.resources.limits.memory": "4Gi", | ||
"tidb.resources.requests.cpu": "500m", | ||
"tidb.resources.requests.memory": "1Gi", | ||
}, | ||
Args: map[string]string{}, | ||
} | ||
|
||
if err := oa.CleanTidbCluster(restoreClusterInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
if err := oa.DeployTidbCluster(restoreClusterInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
if err := oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil { | ||
glog.Fatal(err) | ||
} | ||
perror(oa.CleanTidbCluster(restoreClusterInfo)) | ||
perror(oa.DeployTidbCluster(restoreClusterInfo)) | ||
perror(oa.CheckTidbClusterStatus(restoreClusterInfo)) | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
change to
ScaleTidbClusterTo(info *TidbClusterInfo,pdReplicas int,tikvReplicas int,tidbReplicas int)
is more reasonable