diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c38b4ecbf3..61455249865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -284,7 +284,7 @@ The `monitor` section in new `values.yaml`: monitor: create: true initializer: - image: pingcap/tidb-monitor-initializer:v3.0.1 + image: pingcap/tidb-monitor-initializer:v3.0.5 imagePullPolicy: IfNotPresent reloader: create: true diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 166b0407dd4..6799012e8b4 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -74,7 +74,7 @@ pd: location-labels = ["region", "zone", "rack", "host"] replicas: 3 - image: pingcap/pd:v3.0.4 + image: pingcap/pd:v3.0.5 # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. # different classes might map to quality-of-service levels, or to backup policies, # or to arbitrary policies determined by the cluster administrators. @@ -171,7 +171,8 @@ pd: # Specify the security context of PD Pod. # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - podSecurityContext: {} + podSecurityContext: + {} # sysctls: # # You can enable these kernel parameters tuning to improve TiDB performance, # # when the kubelet is configured to allow unsafe sysctls @@ -221,7 +222,7 @@ tikv: # we can only set capacity in tikv.resources.limits.storage. replicas: 3 - image: pingcap/tikv:v3.0.4 + image: pingcap/tikv:v3.0.5 # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. # different classes might map to quality-of-service levels, or to backup policies, # or to arbitrary policies determined by the cluster administrators. @@ -265,7 +266,8 @@ tikv: # Specify the security context of TiKV Pod. # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - podSecurityContext: {} + podSecurityContext: + {} # sysctls: # # You can enable these kernel parameters tuning to improve TiDB performance, # # when the kubelet is configured to allow unsafe sysctls @@ -311,7 +313,7 @@ tidb: # initSqlConfigMapName: tidb-initsql # initSql: |- # create database app; - image: pingcap/tidb:v3.0.4 + image: pingcap/tidb:v3.0.5 # Image pull policy. imagePullPolicy: IfNotPresent @@ -323,7 +325,6 @@ tidb: # cpu: 12000m # memory: 12Gi - ## affinity defines tikv scheduling rules,affinity default settings is empty. ## please read the affinity document before set your scheduling rule: ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity @@ -348,13 +349,13 @@ tidb: # Specify the security context of TiDB Pod. # refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - podSecurityContext: {} + podSecurityContext: + {} # sysctls: # # You can enable these kernel parameters tuning to improve TiDB performance, # # when the kubelet is configured to allow unsafe sysctls # - name: net.core.somaxconn # value: "32768" - # # Load balancers usually have an idle timeout (eg. AWS NLB idle timeout is 350), # # the tcp_keepalive_time must be set to lower than LB idle timeout. # - name: net.ipv4.tcp_keepalive_time @@ -362,7 +363,6 @@ tidb: # - name: net.ipv4.tcp_keepalive_intvl # value: "75" - # Specify the priorityClassName for TiDB Pod. # refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption priorityClassName: "" @@ -372,7 +372,7 @@ tidb: type: NodePort exposeStatus: true # annotations: - # cloud.google.com/load-balancer-type: Internal + # cloud.google.com/load-balancer-type: Internal separateSlowLog: true slowLogTailer: image: busybox:1.26.2 @@ -424,7 +424,7 @@ monitor: storageClassName: local-storage storage: 10Gi initializer: - image: pingcap/tidb-monitor-initializer:v3.0.4 + image: pingcap/tidb-monitor-initializer:v3.0.5 imagePullPolicy: IfNotPresent config: K8S_PROMETHEUS_URL: http://prometheus-k8s.monitoring.svc:9090 @@ -441,7 +441,8 @@ monitor: imagePullPolicy: IfNotPresent service: type: NodePort - resources: {} + resources: + {} # limits: # cpu: 50m # memory: 64Mi @@ -488,7 +489,8 @@ monitor: type: NodePort reserveDays: 12 # alertmanagerURL: "" - nodeSelector: {} + nodeSelector: + {} # kind: monitor # zone: cn-bj1-01,cn-bj1-02 # region: cn-bj1 @@ -502,7 +504,7 @@ binlog: pump: create: false replicas: 1 - image: pingcap/tidb-binlog:v3.0.4 + image: pingcap/tidb-binlog:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -544,7 +546,7 @@ binlog: drainer: create: false - image: pingcap/tidb-binlog:v3.0.4 + image: pingcap/tidb-binlog:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -579,7 +581,8 @@ binlog: # downstream storage, equal to --dest-db-type # valid values are "mysql", "file", "tidb", "kafka" destDBType: file - mysql: {} + mysql: + {} # host: "127.0.0.1" # user: "root" # password: "" @@ -587,7 +590,8 @@ binlog: # # Time and size limits for flash batch write # timeLimit: "30s" # sizeLimit: "100000" - kafka: {} + kafka: + {} # only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed. # zookeeperAddrs: "127.0.0.1:2181" # kafkaAddrs: "127.0.0.1:9092" @@ -702,7 +706,8 @@ importer: imagePullPolicy: IfNotPresent storageClassName: local-storage storage: 200Gi - resources: {} + resources: + {} # limits: # cpu: 16000m # memory: 8Gi diff --git a/deploy/aliyun/variables.tf b/deploy/aliyun/variables.tf index 5920d252e2e..852a3ca3b4c 100644 --- a/deploy/aliyun/variables.tf +++ b/deploy/aliyun/variables.tf @@ -36,7 +36,7 @@ variable "cluster_name" { variable "tidb_version" { description = "TiDB cluster version" - default = "v3.0.4" + default = "v3.0.5" } variable "tidb_cluster_chart_version" { description = "tidb-cluster chart version" diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index 101406a8b8b..1da87b04974 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -24,7 +24,7 @@ provider "helm" { # # # NOTE: cluster_name cannot be changed after creation # cluster_name = "demo-cluster" -# cluster_version = "v3.0.4" +# cluster_version = "v3.0.5" # ssh_key_name = module.key-pair.key_name # pd_count = 1 # pd_instance_type = "t2.xlarge" diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index 6f7f1756714..95c0195113d 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -80,7 +80,7 @@ variable "bastion_instance_type" { # For aws tutorials compatiablity variable "default_cluster_version" { - default = "v3.0.4" + default = "v3.0.5" } variable "default_cluster_pd_count" { diff --git a/deploy/gcp/variables.tf b/deploy/gcp/variables.tf index 8391abca2da..848980765b9 100644 --- a/deploy/gcp/variables.tf +++ b/deploy/gcp/variables.tf @@ -24,7 +24,7 @@ variable "node_locations" { variable "tidb_version" { description = "TiDB version" - default = "v3.0.4" + default = "v3.0.5" } variable "tidb_operator_version" { diff --git a/deploy/modules/aws/tidb-cluster/variables.tf b/deploy/modules/aws/tidb-cluster/variables.tf index 7730c4351c5..82846cd87c7 100644 --- a/deploy/modules/aws/tidb-cluster/variables.tf +++ b/deploy/modules/aws/tidb-cluster/variables.tf @@ -19,7 +19,7 @@ variable "cluster_name" { variable "cluster_version" { type = string - default = "v3.0.4" + default = "v3.0.5" } variable "ssh_key_name" { diff --git a/deploy/modules/gcp/tidb-cluster/variables.tf b/deploy/modules/gcp/tidb-cluster/variables.tf index 9f523b878c0..7e57619a1ff 100644 --- a/deploy/modules/gcp/tidb-cluster/variables.tf +++ b/deploy/modules/gcp/tidb-cluster/variables.tf @@ -9,7 +9,11 @@ variable "tidb_operator_id" { variable "cluster_name" {} variable "cluster_version" { description = "The TiDB cluster version" +<<<<<<< HEAD default = "v3.0.4" +======= + default = "v3.0.5" +>>>>>>> 385da819... upgrade default TiDB version to v3.0.5 (#1132) } variable "tidb_cluster_chart_version" { description = "The TiDB cluster chart version" diff --git a/deploy/modules/share/tidb-cluster-release/variables.tf b/deploy/modules/share/tidb-cluster-release/variables.tf index 3c9d6e1a8ae..8b74606a8bf 100644 --- a/deploy/modules/share/tidb-cluster-release/variables.tf +++ b/deploy/modules/share/tidb-cluster-release/variables.tf @@ -20,7 +20,7 @@ variable "cluster_name" { variable "cluster_version" { type = string - default = "v3.0.4" + default = "v3.0.5" } variable "pd_count" { diff --git a/images/tidb-operator-e2e/tidb-cluster-values.yaml b/images/tidb-operator-e2e/tidb-cluster-values.yaml index d2908b71312..960c30a1e35 100644 --- a/images/tidb-operator-e2e/tidb-cluster-values.yaml +++ b/images/tidb-operator-e2e/tidb-cluster-values.yaml @@ -36,7 +36,7 @@ discovery: pd: replicas: 3 - image: pingcap/pd:v3.0.4 + image: pingcap/pd:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -64,7 +64,8 @@ pd: storage: 1Gi # nodeSelector is used for scheduling pod, # if nodeSelectorRequired is true, all the following labels must be matched - nodeSelector: {} + nodeSelector: + {} # kind: pd # # zone is comma separated availability zone list # zone: cn-bj1-01,cn-bj1-02 @@ -81,7 +82,7 @@ pd: tikv: replicas: 3 - image: pingcap/tikv:v3.0.4 + image: pingcap/tikv:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -102,7 +103,8 @@ tikv: # cpu: 12000m # memory: 24Gi storage: 10Gi - nodeSelector: {} + nodeSelector: + {} # kind: tikv # zone: cn-bj1-01,cn-bj1-02 # region: cn-bj1 @@ -134,7 +136,7 @@ tidb: # initSql is the SQL statements executed after the TiDB cluster is bootstrapped. # initSql: |- # create database app; - image: pingcap/tidb:v3.0.4 + image: pingcap/tidb:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info resources: @@ -144,7 +146,8 @@ tidb: requests: {} # cpu: 12000m # memory: 12Gi - nodeSelector: {} + nodeSelector: + {} # kind: tidb # zone: cn-bj1-01,cn-bj1-02 # region: cn-bj1 @@ -159,7 +162,7 @@ tidb: type: NodePort exposeStatus: true # annotations: - # cloud.google.com/load-balancer-type: Internal + # cloud.google.com/load-balancer-type: Internal # mysqlClient is used to set password for TiDB # it must has Python MySQL client installed @@ -210,7 +213,8 @@ monitor: type: NodePort reserveDays: 12 # alertmanagerURL: "" - nodeSelector: {} + nodeSelector: + {} # kind: monitor # zone: cn-bj1-01,cn-bj1-02 # region: cn-bj1 @@ -222,7 +226,7 @@ monitor: fullbackup: create: false - binlogImage: pingcap/tidb-binlog:v3.0.4 + binlogImage: pingcap/tidb-binlog:v3.0.5 binlogImagePullPolicy: IfNotPresent # https://github.com/tennix/tidb-cloud-backup mydumperImage: pingcap/tidb-cloud-backup:20190610 @@ -248,14 +252,16 @@ fullbackup: user: backup password: "Password here ..." # backup to gcp - gcp: {} + gcp: + {} # base64 format of the service account json file data # https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually # and then: cat credentials.json | base64 | tr -d '\n' # credentialsData: "" # bucket: "" # backup to ceph object storage - ceph: {} + ceph: + {} # endpoint: "" # bucket: "" @@ -263,7 +269,7 @@ binlog: pump: create: false replicas: 1 - image: pingcap/tidb-binlog:v3.0.4 + image: pingcap/tidb-binlog:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -280,7 +286,7 @@ binlog: drainer: create: false - image: pingcap/tidb-binlog:v3.0.4 + image: pingcap/tidb-binlog:v3.0.5 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -308,7 +314,8 @@ binlog: # downstream storage, equal to --dest-db-type # valid values are "mysql", "pb", "kafka" destDBType: pb - mysql: {} + mysql: + {} # host: "127.0.0.1" # user: "root" # password: "" @@ -316,7 +323,8 @@ binlog: # # Time and size limits for flash batch write # timeLimit: "30s" # sizeLimit: "100000" - kafka: {} + kafka: + {} # only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed. # zookeeperAddrs: "127.0.0.1:2181" # kafkaAddrs: "127.0.0.1:9092" diff --git a/tests/config.go b/tests/config.go index efc9cf52f42..63804e3d6a9 100644 --- a/tests/config.go +++ b/tests/config.go @@ -90,8 +90,8 @@ func NewConfig() (*Config, error) { flag.StringVar(&cfg.configFile, "config", "", "Config file") flag.StringVar(&cfg.LogDir, "log-dir", "/logDir", "log directory") flag.IntVar(&cfg.FaultTriggerPort, "fault-trigger-port", 23332, "the http port of fault trigger service") - flag.StringVar(&cfg.TidbVersions, "tidb-versions", "v3.0.2,v3.0.3,v3.0.4", "tidb versions") flag.StringVar(&cfg.TestApiserverImage, "test-apiserver-image", "pingcap/test-apiserver:latest", "test-apiserver image") + flag.StringVar(&cfg.TidbVersions, "tidb-versions", "v3.0.2,v3.0.3,v3.0.4,v3.0.5", "tidb versions") flag.StringVar(&cfg.OperatorTag, "operator-tag", "master", "operator tag used to choose charts") flag.StringVar(&cfg.OperatorImage, "operator-image", "pingcap/tidb-operator:latest", "operator image") flag.StringVar(&cfg.UpgradeOperatorTag, "upgrade-operator-tag", "", "upgrade operator tag used to choose charts") diff --git a/tests/manifests/e2e/e2e.yaml b/tests/manifests/e2e/e2e.yaml index 6a917a85485..d95cb2fc677 100644 --- a/tests/manifests/e2e/e2e.yaml +++ b/tests/manifests/e2e/e2e.yaml @@ -7,8 +7,8 @@ metadata: app: webhook-service spec: ports: - - port: 443 - targetPort: 443 + - port: 443 + targetPort: 443 selector: app: webhook --- @@ -17,9 +17,9 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: tidb-operator-e2e subjects: -- kind: ServiceAccount - namespace: tidb-operator-e2e - name: tidb-operator-e2e + - kind: ServiceAccount + namespace: tidb-operator-e2e + name: tidb-operator-e2e roleRef: kind: ClusterRole name: cluster-admin @@ -41,28 +41,27 @@ metadata: spec: serviceAccount: tidb-operator-e2e containers: - - name: tidb-operator-e2e - image: localhost:5000/pingcap/tidb-operator-e2e:latest - imagePullPolicy: Always - command: - - /usr/local/bin/e2e - - --operator-tag=e2e - - --operator-image=localhost:5000/pingcap/tidb-operator:latest - - --test-apiserver-image=localhost:5000/pingcap/test-apiserver:latest - - --tidb-versions=v3.0.2,v3.0.3,v3.0.4 - - --chart-dir=/charts - - -v=4 - volumeMounts: - - mountPath: /logDir - name: logdir - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + - name: tidb-operator-e2e + image: localhost:5000/pingcap/tidb-operator-e2e:latest + imagePullPolicy: Always + command: + - /usr/local/bin/e2e + - --operator-tag=e2e + - --operator-image=pingcap/tidb-operator:latest + - --tidb-versions=v3.0.2,v3.0.3,v3.0.4,v3.0.5 + - --chart-dir=/charts + - -v=4 + volumeMounts: + - mountPath: /logDir + name: logdir + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace volumes: - - name: logdir - hostPath: - path: /var/log - type: Directory + - name: logdir + hostPath: + path: /var/log + type: Directory restartPolicy: Never