From 152c65c51385172bdd5a3f7aac9935f0cb077de9 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Thu, 18 Jul 2019 17:37:13 +0800 Subject: [PATCH] Remove storeLabels (#663) * remove storeLabels (cherry picked from commit 825705b0f2de73fac1ac7bdc418eff7a524be3b2) --- .../tidb-cluster/templates/tidb-cluster.yaml | 10 ------- charts/tidb-cluster/values.yaml | 11 -------- pkg/apis/pingcap.com/v1alpha1/types.go | 1 - .../v1alpha1/zz_generated.deepcopy.go | 5 ---- pkg/manager/member/tikv_member_manager.go | 12 ++++++++- .../member/tikv_member_manager_test.go | 26 ++++++++++++++++--- tests/cluster_info.go | 4 +-- tests/util.go | 15 +++-------- 8 files changed, 40 insertions(+), 44 deletions(-) diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml index 7e6484a368..a2d94ea355 100644 --- a/charts/tidb-cluster/templates/tidb-cluster.yaml +++ b/charts/tidb-cluster/templates/tidb-cluster.yaml @@ -67,16 +67,6 @@ spec: annotations: {{ toYaml .Values.tikv.annotations | indent 6 }} {{- end }} - {{- if .Values.tikv.storeLabels }} - storeLabels: -{{ toYaml .Values.tikv.storeLabels | indent 4 }} - {{- else }} - storeLabels: - - region - - zone - - rack - - host - {{- end }} tidb: replicas: {{ .Values.tidb.replicas }} image: {{ .Values.tidb.image }} diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 8b1ac22d31..8ca19f8460 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -241,17 +241,6 @@ tikv: # effect: "NoSchedule" annotations: {} - ## storeLabels is used to define store label keys - ## The label keys specified the location of a store. - ## In order to use the location awareness feature of TiKV, users have to label their k8s nodes with the same labels. - ## Note: current can not support these labels contains "/" - ## The placement priorities is implied by the order of label keys. - ## For example, ["zone", "rack"] means that we should place replicas to - ## different zones first, then to different racks if we don't have enough zones. - ## default value is ["region", "zone", "rack", "host"] - ## If you change the default value below, please do sync the change to pd.config.[replication].location-labels - ## storeLabels: ["region", "zone", "rack", "host"] - tidb: # Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default # tidb configurations(change to the tags of your tidb version), diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index 402ceb449b..a539b4c123 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -144,7 +144,6 @@ type TiKVSpec struct { StorageClassName string `json:"storageClassName,omitempty"` Tolerations []corev1.Toleration `json:"tolerations,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` - StoreLabels []string `json:"storeLabels,omitempty"` } // TiKVPromGatewaySpec runs as a sidecar with TiKVSpec diff --git a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go index df77c36db6..1bf3b6d605 100644 --- a/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pingcap.com/v1alpha1/zz_generated.deepcopy.go @@ -388,11 +388,6 @@ func (in *TiKVSpec) DeepCopyInto(out *TiKVSpec) { (*out)[key] = val } } - if in.StoreLabels != nil { - in, out := &in.StoreLabels, &out.StoreLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } return } diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 114f159359..8b7c50dbe9 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -524,6 +524,16 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) ( return setCount, err } + config, err := pdCli.GetConfig() + if err != nil { + return setCount, err + } + + locationLabels := []string(config.Replication.LocationLabels) + if locationLabels == nil { + return setCount, nil + } + for _, store := range storesInfo.Stores { status := tkmm.getTiKVStore(store) if status == nil { @@ -537,7 +547,7 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) ( } nodeName := pod.Spec.NodeName - ls, err := tkmm.getNodeLabels(nodeName, tc.Spec.TiKV.StoreLabels) + ls, err := tkmm.getNodeLabels(nodeName, locationLabels) if err != nil || len(ls) == 0 { glog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName) continue diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index 0e2afa6d9d..798fde3113 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -21,6 +21,8 @@ import ( . "github.com/onsi/gomega" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/pd/pkg/typeutil" + "github.com/pingcap/pd/server" "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake" informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions" @@ -72,7 +74,13 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) { } tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc) - + pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { + return &server.Config{ + Replication: server.ReplicationConfig{ + LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"}, + }, + }, nil + }) if test.errWhenGetStores { pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { return nil, fmt.Errorf("failed to get stores from tikv cluster") @@ -221,6 +229,13 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) { tcName := tc.Name tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc) + pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { + return &server.Config{ + Replication: server.ReplicationConfig{ + LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"}, + }, + }, nil + }) if test.errWhenGetStores { pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { return nil, fmt.Errorf("failed to get stores from pd cluster") @@ -489,9 +504,14 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) { } testFn := func(test *testcase, t *testing.T) { tc := newTidbClusterForPD() - tc.Spec.TiKV.StoreLabels = []string{"region", "zone", "rack"} pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiKVMemberManager(tc) - + pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) { + return &server.Config{ + Replication: server.ReplicationConfig{ + LocationLabels: typeutil.StringSlice{"region", "zone", "rack", "host"}, + }, + }, nil + }) if test.errWhenGetStores { pdClient.AddReaction(pdapi.GetStoresActionType, func(action *pdapi.Action) (interface{}, error) { return nil, fmt.Errorf("failed to get stores") diff --git a/tests/cluster_info.go b/tests/cluster_info.go index 68ed997000..0caea9decd 100644 --- a/tests/cluster_info.go +++ b/tests/cluster_info.go @@ -100,7 +100,7 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) { fmt.Sprintf(`level = "%s"`, pdLogLevel), "[replication]", fmt.Sprintf("max-replicas = %d", pdMaxReplicas), - `location-labels = ["region", "zone", "rack", "host"]`, + fmt.Sprintf(`location-labels = ["%s"]`, tc.TopologyKey), } tikvConfig := []string{ "[log]", @@ -113,7 +113,7 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) { "[log]", `level = "info"`, } - subValues := GetAffinityConfigOrDie(tc.ClusterName, tc.Namespace, tc.TopologyKey, []string{tc.TopologyKey}, pdConfig, tikvConfig, tidbConfig) + subValues := GetSubValuesOrDie(tc.ClusterName, tc.Namespace, tc.TopologyKey, pdConfig, tikvConfig, tidbConfig) subVaulesPath := fmt.Sprintf("%s/%s.yaml", path, tc.ClusterName) _, err := os.Stat(subVaulesPath) if err != nil { diff --git a/tests/util.go b/tests/util.go index 81a5ec7efa..d47233b5c0 100644 --- a/tests/util.go +++ b/tests/util.go @@ -89,12 +89,6 @@ func GetPodsByLabels(kubeCli kubernetes.Interface, node string, lables map[strin } var affinityTemp string = `{{.Kind}}: -{{ $length := len .StoreLabels}} {{ if or (not .StoreLabels) (eq $length 0)}} -{{else if eq .Kind "tikv"}} - storeLabels: -{{range .StoreLabels}} - {{.}} -{{end}} -{{end}} config: | {{range .Config}} {{.}} {{end}} @@ -118,28 +112,27 @@ type AffinityInfo struct { Weight int Namespace string TopologyKey string - StoreLabels []string Config []string } -func GetAffinityConfigOrDie(clusterName, namespace, topologyKey string, storeLabels []string, pdConfig []string, tikvConfig []string, tidbConfig []string) string { +func GetSubValuesOrDie(clusterName, namespace, topologyKey string, pdConfig []string, tikvConfig []string, tidbConfig []string) string { temp, err := template.New("dt-affinity").Parse(affinityTemp) if err != nil { slack.NotifyAndPanic(err) } pdbuff := new(bytes.Buffer) - err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: pdConfig}) + err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: pdConfig}) if err != nil { slack.NotifyAndPanic(err) } tikvbuff := new(bytes.Buffer) - err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: tikvConfig}) + err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: tikvConfig}) if err != nil { slack.NotifyAndPanic(err) } tidbbuff := new(bytes.Buffer) - err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, StoreLabels: storeLabels, Config: tidbConfig}) + err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 50, Namespace: namespace, TopologyKey: topologyKey, Config: tidbConfig}) if err != nil { slack.NotifyAndPanic(err) }