From aa621769a80bd662fa55574eb6258299067361c5 Mon Sep 17 00:00:00 2001 From: Aylei Date: Wed, 17 Jul 2019 12:22:39 +0800 Subject: [PATCH] Modularization for AWS terraform scripts (#650) * Small fixes for terraform aws - fix hardcode chart version - fix unreachable bastion - adjust tidb-cluster no-wait to avoid scheduled backup pvc blocking - add version check - disable pd node public_ip Signed-off-by: Aylei * Modularization for AWS terraform scripts Signed-off-by: Aylei * Refine README of AWS terraform Signed-off-by: Aylei * Fix bastion ssh Signed-off-by: Aylei * Rephrase section title Signed-off-by: Aylei * Update deploy/modules/aws/tidb-operator/README.md Co-Authored-By: Tennix * Address review comments Signed-off-by: Aylei --- deploy/.gitignore | 6 + deploy/aws/README.md | 143 ++++++++++++++-- deploy/aws/bastion.tf | 36 ----- deploy/aws/clusters.tf | 6 +- deploy/aws/main.tf | 57 ++++--- deploy/aws/outputs.tf | 6 +- deploy/aws/tidb-cluster/outputs.tf | 7 - deploy/aws/variables.tf | 2 +- .../aws/bastion}/bastion-userdata | 0 deploy/modules/aws/bastion/bastion.tf | 43 +++++ deploy/{aws => modules/aws/bastion}/data.tf | 3 - deploy/modules/aws/bastion/outputs.tf | 4 + deploy/modules/aws/bastion/variables.tf | 38 +++++ .../aws/key-pair}/main.tf | 0 .../aws/key-pair}/outputs.tf | 0 .../aws/key-pair}/variables.tf | 0 .../aws/key-pair}/versions.tf | 0 .../{ => modules}/aws/tidb-cluster/README.md | 0 deploy/modules/aws/tidb-cluster/cluster.tf | 14 ++ deploy/{ => modules}/aws/tidb-cluster/data.tf | 12 -- .../{ => modules}/aws/tidb-cluster/local.tf | 0 deploy/modules/aws/tidb-cluster/outputs.tf | 7 + .../aws/tidb-cluster/pre_userdata | 0 .../tidb-cluster/templates/userdata.sh.tpl | 0 .../aws/tidb-cluster/values/default.yaml | 0 .../aws/tidb-cluster/variables.tf | 153 ++++++++---------- .../{ => modules}/aws/tidb-cluster/workers.tf | 0 .../tidb-cluster/workers_launch_template.tf | 0 .../{ => modules}/aws/tidb-operator/README.md | 4 +- .../{ => modules}/aws/tidb-operator/main.tf | 8 +- .../manifests/gp2-storageclass.yaml | 0 .../manifests/local-volume-provisioner.yaml | 0 .../aws/tidb-operator/outputs.tf | 0 .../aws/tidb-operator/variables.tf | 0 deploy/modules/aws/vpc/main.tf | 27 ++++ deploy/modules/aws/vpc/outputs.tf | 11 ++ deploy/modules/aws/vpc/variables.tf | 31 ++++ .../share/tidb-cluster-release/data.tf | 11 ++ .../share/tidb-cluster-release/main.tf} | 9 +- .../share/tidb-cluster-release/outputs.tf | 7 + .../share/tidb-cluster-release/variables.tf | 50 ++++++ 41 files changed, 494 insertions(+), 201 deletions(-) create mode 100644 deploy/.gitignore delete mode 100644 deploy/aws/bastion.tf delete mode 100644 deploy/aws/tidb-cluster/outputs.tf rename deploy/{aws => modules/aws/bastion}/bastion-userdata (100%) create mode 100644 deploy/modules/aws/bastion/bastion.tf rename deploy/{aws => modules/aws/bastion}/data.tf (77%) create mode 100644 deploy/modules/aws/bastion/outputs.tf create mode 100644 deploy/modules/aws/bastion/variables.tf rename deploy/{aws/aws-key-pair => modules/aws/key-pair}/main.tf (100%) rename deploy/{aws/aws-key-pair => modules/aws/key-pair}/outputs.tf (100%) rename deploy/{aws/aws-key-pair => modules/aws/key-pair}/variables.tf (100%) rename deploy/{aws/aws-key-pair => modules/aws/key-pair}/versions.tf (100%) rename deploy/{ => modules}/aws/tidb-cluster/README.md (100%) create mode 100644 deploy/modules/aws/tidb-cluster/cluster.tf rename deploy/{ => modules}/aws/tidb-cluster/data.tf (80%) rename deploy/{ => modules}/aws/tidb-cluster/local.tf (100%) create mode 100644 deploy/modules/aws/tidb-cluster/outputs.tf rename deploy/{ => modules}/aws/tidb-cluster/pre_userdata (100%) rename deploy/{ => modules}/aws/tidb-cluster/templates/userdata.sh.tpl (100%) rename deploy/{ => modules}/aws/tidb-cluster/values/default.yaml (100%) rename deploy/{ => modules}/aws/tidb-cluster/variables.tf (83%) rename deploy/{ => modules}/aws/tidb-cluster/workers.tf (100%) rename deploy/{ => modules}/aws/tidb-cluster/workers_launch_template.tf (100%) rename deploy/{ => modules}/aws/tidb-operator/README.md (75%) rename deploy/{ => modules}/aws/tidb-operator/main.tf (91%) rename deploy/{ => modules}/aws/tidb-operator/manifests/gp2-storageclass.yaml (100%) rename deploy/{ => modules}/aws/tidb-operator/manifests/local-volume-provisioner.yaml (100%) rename deploy/{ => modules}/aws/tidb-operator/outputs.tf (100%) rename deploy/{ => modules}/aws/tidb-operator/variables.tf (100%) create mode 100644 deploy/modules/aws/vpc/main.tf create mode 100644 deploy/modules/aws/vpc/outputs.tf create mode 100644 deploy/modules/aws/vpc/variables.tf create mode 100644 deploy/modules/share/tidb-cluster-release/data.tf rename deploy/{aws/tidb-cluster/cluster.tf => modules/share/tidb-cluster-release/main.tf} (93%) create mode 100644 deploy/modules/share/tidb-cluster-release/outputs.tf create mode 100644 deploy/modules/share/tidb-cluster-release/variables.tf diff --git a/deploy/.gitignore b/deploy/.gitignore new file mode 100644 index 0000000000..908ff97a6b --- /dev/null +++ b/deploy/.gitignore @@ -0,0 +1,6 @@ +.terraform/ +credentials/ +terraform.tfstate +terraform.tfstate.backup +.terraform.tfstate.lock.info +kubeconfig_*.yaml diff --git a/deploy/aws/README.md b/deploy/aws/README.md index db42c615cb..1f26aeee7f 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -187,9 +187,9 @@ module example-cluster { source = "./tidb-cluster" # The target EKS, required - eks_info = local.default_eks + eks_info = local.eks # The subnets of node pools of this TiDB cluster, required - subnets = local.default_subnets + subnets = local.subnets # TiDB cluster name, required cluster_name = "example-cluster" @@ -261,27 +261,144 @@ $ terraform destroy > > You have to manually delete the EBS volumes in AWS console after running terraform destroy if you do not need the data on the volumes anymore. -## Advanced Guide: Use the tidb-cluster and tidb-operator Modules +## Multiple Kubernetes Management -Under the hood, this terraform module composes two sub-modules: +In this section, we will investigate the best practice to manage multiple Kubernetes clusters, each with one or more TiDB clusters installed. -- [tidb-operator](./tidb-operator/README.md), which provisions the Kubernetes control plane for TiDB cluster -- [tidb-cluster](./tidb-cluster/README.md), which provisions a TiDB cluster in the target Kubernetes cluster +Under the hood, this terraform module composes several sub-modules: -You can use these modules separately in your own terraform scripts, by either referencing these modules locally or publish these modules to your terraform module registry. +- [tidb-operator](../modules/aws/tidb-operator/README.md), which provisions the Kubernetes control plane for TiDB cluster +- [tidb-cluster](../modules/aws/tidb-cluster/README.md), which provisions a TiDB cluster in the target Kubernetes cluster +- ...and a `VPC` module, a `bastion` module and a `key-pair` module that are dedicated to TiDB on AWS -For example, let's say you create a terraform module in `/deploy/aws/staging`, you can reference the tidb-operator and tidb-cluster modules as following: +The best practice is creating a new directory for each of your Kubernetes cluster and composing these modules via terraform scripts, so that the terraform state and cluster credentials of each cluster won't be screwed. Here's an example: + +```shell +# assume we are in the project root +$ mkdir -p deploy/aws-staging +$ vim deploy/aws-staging/main.tf +``` + +The content of `deploy/aws-staging/main.tf` could be: ```hcl -module "setup-control-plane" { - source = "../tidb-operator" +provider "aws" { + region = "us-west-1" +} + +# create a key pair for ssh to bastion, also for ssh from bastion to worker nodes +module "key-pair" { + source = "../modules/aws/key-pair" + + name = "another-eks-cluster" + path = "${path.cwd}/credentials/" +} + +# provision a VPC +module "vpc" { + source = "../modules/aws/vpc" + + vpc_name = "another-eks-cluster" +} + +# provision a EKS control plane with tidb-opeartor installed +module "tidb-operator" { + source = "../modules/aws/tidb-operator" + + eks_name = "another-eks-cluster" + config_output_path = "credentials/" + subnets = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + ssh_key_name = module.key-pair.key_name +} + +# HACK: enforce helm to depend on the EKS +resource "local_file" "kubeconfig" { + depends_on = [module.tidb-operator.eks] + sensitive_content = module.tidb-operator.eks.kubeconfig + filename = module.tidb-operator.eks.kubeconfig_filename +} +provider "helm" { + alias = "eks" + insecure = true + install_tiller = false + kubernetes { + config_path = local_file.kubeconfig.filename + } } +# provision a tidb-cluster in the eks cluster module "tidb-cluster-a" { - source = "../tidb-cluster" + source = "../modules/aws/tidb-cluster" + providers = { + helm = "helm.eks" + } + + cluster_name = "tidb-cluster-a" + eks = module.tidb-operator.eks + ssh_key_name = module.key-pair.key_name + subnets = module.vpc.private_subnets } +# provision another tidb-cluster in the eks cluster module "tidb-cluster-b" { - source = "../tidb-cluster" + source = "../modules/aws/tidb-cluster" + providers = { + helm = "helm.eks" + } + + cluster_name = "tidb-cluster-b" + eks = module.tidb-operator.eks + ssh_key_name = module.key-pair.key_name + subnets = module.vpc.private_subnets } -``` + +# provision a bastion machine to access the TiDB service and worker nodes +module "bastion" { + source = "../modules/aws/bastion" + + bastion_name = "another-eks-cluster-bastion" + key_name = module.key-pair.key_name + public_subnets = module.vpc.public_subnets + vpc_id = module.vpc.vpc_id + target_security_group_id = module.tidb-operator.eks.worker_security_group_id + enable_ssh_to_workers = true +} + +# print the tidb hostname of tidb-cluster-a +output "cluster-a_tidb-dns" { + description = "tidb service endpoints" + value = module.tidb-cluster-a.tidb_hostname +} + +# print the monitor hostname of tidb-cluster-b +output "cluster-b_monitor-dns" { + description = "tidb service endpoint" + value = module.tidb-cluster-b.monitor_hostname +} + +output "bastion_ip" { + description = "Bastion IP address" + value = module.bastion.bastion_ip +} +``` + +As shown in the code above, you can omit most of the parameters in each of the module calls because there are reasonable defaults, and it is easy to customize the setup: you just delete the bastion module call if you don't need it. + +To customize each fields, you can refer to this terraform module as a great example, also, you can always refer to the `variables.tf` of each of the modules to investigate all the available parameters. + +Also, it requires little effort if you want to integrate these modules into your own terraform codebase, and this is what these modules are designed for. + +> **Note:** +> +> If you create the new directory elsewhere, please take care of the relative path of modules. + +> **Note:** +> +> If you want to use these modules outside of the tidb-operator project, make sure you copy the whole `modules` directory and keep the relative path of each module inside the directory unchanged. + +> **Note:** +> +> The hack of helm provider is necessary in case of [hashicorp/terraform#2430](https://github.com/hashicorp/terraform/issues/2430#issuecomment-370685911), please keep it in your terraform scripts. + +If you are unwilling to touch the terraform code, copy this directory for each of your Kubernetes clusters also make sense. diff --git a/deploy/aws/bastion.tf b/deploy/aws/bastion.tf deleted file mode 100644 index 9ea19c8216..0000000000 --- a/deploy/aws/bastion.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "aws_security_group" "ssh" { - name = "${var.eks_name}-bastion" - description = "Allow SSH access for bastion instance" - vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = var.bastion_ingress_cidr - } - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -module "ec2" { - source = "terraform-aws-modules/ec2-instance/aws" - - version = "2.3.0" - name = "${var.eks_name}-bastion" - instance_count = var.create_bastion ? 1 : 0 - ami = data.aws_ami.amazon-linux-2.id - instance_type = var.bastion_instance_type - key_name = module.key-pair.key_name - associate_public_ip_address = true - monitoring = false - user_data = file("bastion-userdata") - vpc_security_group_ids = [aws_security_group.ssh.id] - subnet_ids = split( - ",", - var.create_vpc ? join(",", module.vpc.public_subnets) : join(",", var.subnets), - ) -} \ No newline at end of file diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf index fe9d851388..a9cf38f225 100644 --- a/deploy/aws/clusters.tf +++ b/deploy/aws/clusters.tf @@ -41,9 +41,9 @@ module "default-cluster" { providers = { helm = "helm.eks" } - source = "./tidb-cluster" - eks = local.default_eks - subnets = local.default_subnets + source = "../modules/aws/tidb-cluster" + eks = local.eks + subnets = local.subnets cluster_name = var.default_cluster_name cluster_version = var.default_cluster_version diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index 014633de51..36ce5dd0ec 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -3,49 +3,46 @@ provider "aws" { } locals { - default_subnets = split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets)) - default_eks = module.tidb-operator.eks + eks = module.tidb-operator.eks + subnets = module.vpc.private_subnets } module "key-pair" { - source = "./aws-key-pair" - name = var.eks_name - path = "${path.module}/credentials/" + source = "../modules/aws/key-pair" + + name = var.eks_name + path = "${path.cwd}/credentials/" } module "vpc" { - source = "terraform-aws-modules/vpc/aws" - - version = "2.6.0" - name = var.eks_name - cidr = var.vpc_cidr - create_vpc = var.create_vpc - azs = data.aws_availability_zones.available.names - private_subnets = var.private_subnets - public_subnets = var.public_subnets - enable_nat_gateway = true - single_nat_gateway = true - - # The following tags are required for ELB - private_subnet_tags = { - "kubernetes.io/cluster/${var.eks_name}" = "shared" - } - public_subnet_tags = { - "kubernetes.io/cluster/${var.eks_name}" = "shared" - } - vpc_tags = { - "kubernetes.io/cluster/${var.eks_name}" = "shared" - } + source = "../modules/aws/vpc" + + vpc_name = var.eks_name + create_vpc = var.create_vpc + private_subnets = var.private_subnets + public_subnets = var.public_subnets + vpc_cidr = var.vpc_cidr } module "tidb-operator" { - source = "./tidb-operator" + source = "../modules/aws/tidb-operator" eks_name = var.eks_name eks_version = var.eks_version operator_version = var.operator_version config_output_path = "credentials/" - subnets = local.default_subnets - vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id + subnets = local.subnets + vpc_id = module.vpc.vpc_id ssh_key_name = module.key-pair.key_name } + +module "bastion" { + source = "../modules/aws/bastion" + + bastion_name = "${var.eks_name}-bastion" + key_name = module.key-pair.key_name + public_subnets = module.vpc.public_subnets + vpc_id = module.vpc.vpc_id + worker_security_group_id = local.eks.worker_security_group_id + enable_ssh_to_workers = true +} diff --git a/deploy/aws/outputs.tf b/deploy/aws/outputs.tf index 182120d129..ac3451553d 100644 --- a/deploy/aws/outputs.tf +++ b/deploy/aws/outputs.tf @@ -20,15 +20,15 @@ output "kubeconfig_filename" { output "default-cluster_tidb-dns" { description = "tidb service endpoints" - value = module.default-cluster.tidb_dns + value = module.default-cluster.tidb_hostname } output "default-cluster_monitor-dns" { description = "tidb service endpoint" - value = module.default-cluster.monitor_dns + value = module.default-cluster.monitor_hostname } output "bastion_ip" { description = "Bastion IP address" - value = module.ec2.public_ip + value = module.bastion.bastion_ip } diff --git a/deploy/aws/tidb-cluster/outputs.tf b/deploy/aws/tidb-cluster/outputs.tf deleted file mode 100644 index 396e707f5c..0000000000 --- a/deploy/aws/tidb-cluster/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "tidb_dns" { - value = lookup(data.external.tidb_elb.result, "hostname", "empty") -} - -output "monitor_dns" { - value = lookup(data.external.monitor_elb.result, "hostname", "emtpy") -} diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf index d2b6e3aca9..760bb3fd79 100644 --- a/deploy/aws/variables.tf +++ b/deploy/aws/variables.tf @@ -23,7 +23,7 @@ variable "operator_version" { } variable "operator_values" { - description = "The helm values of TiDB Operator" + description = "The helm values of TiDB Operator, it is recommended to use the 'file()' function call to read the content from a local file, e.g. 'file(\"my-cluster.yaml\")'" default = "" } diff --git a/deploy/aws/bastion-userdata b/deploy/modules/aws/bastion/bastion-userdata similarity index 100% rename from deploy/aws/bastion-userdata rename to deploy/modules/aws/bastion/bastion-userdata diff --git a/deploy/modules/aws/bastion/bastion.tf b/deploy/modules/aws/bastion/bastion.tf new file mode 100644 index 0000000000..0f9df0dab3 --- /dev/null +++ b/deploy/modules/aws/bastion/bastion.tf @@ -0,0 +1,43 @@ +resource "aws_security_group" "accept_ssh_from_local" { + name = var.bastion_name + description = "Allow SSH access for bastion instance" + vpc_id = var.vpc_id + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = var.bastion_ingress_cidr + } + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group_rule" "enable_ssh_to_workers" { + count = var.enable_ssh_to_workers ? 1 : 0 + security_group_id = var.worker_security_group_id + source_security_group_id = aws_security_group.accept_ssh_from_local.id + from_port = 22 + to_port = 22 + protocol = "tcp" + type = "ingress" +} + +module "ec2" { + source = "terraform-aws-modules/ec2-instance/aws" + + version = "2.3.0" + name = var.bastion_name + instance_count = 1 + ami = data.aws_ami.amazon-linux-2.id + instance_type = var.bastion_instance_type + key_name = var.key_name + associate_public_ip_address = true + monitoring = false + user_data = file("${path.module}/bastion-userdata") + vpc_security_group_ids = [aws_security_group.accept_ssh_from_local.id] + subnet_ids = var.public_subnets +} diff --git a/deploy/aws/data.tf b/deploy/modules/aws/bastion/data.tf similarity index 77% rename from deploy/aws/data.tf rename to deploy/modules/aws/bastion/data.tf index 054c461d3a..c79b6a3b7d 100644 --- a/deploy/aws/data.tf +++ b/deploy/modules/aws/bastion/data.tf @@ -1,6 +1,3 @@ -data "aws_availability_zones" "available" { -} - data "aws_ami" "amazon-linux-2" { most_recent = true diff --git a/deploy/modules/aws/bastion/outputs.tf b/deploy/modules/aws/bastion/outputs.tf new file mode 100644 index 0000000000..3e6cfd3444 --- /dev/null +++ b/deploy/modules/aws/bastion/outputs.tf @@ -0,0 +1,4 @@ +output "bastion_ip" { + description = "Bastion IP address" + value = module.ec2.public_ip +} diff --git a/deploy/modules/aws/bastion/variables.tf b/deploy/modules/aws/bastion/variables.tf new file mode 100644 index 0000000000..82a58614cd --- /dev/null +++ b/deploy/modules/aws/bastion/variables.tf @@ -0,0 +1,38 @@ +variable "bastion_name" { + description = "Name of the EKS cluster. Also used as a prefix in names of related resources." + type = string +} + +variable "enable_ssh_to_workers" { + description = "Whether enable ssh from bastion to workers, if true, the worker_security_group_id must be provided" + default = false +} + +variable "worker_security_group_id" { + description = "The security group that bastion allowed to ssh to" + type = string + default = "" +} + +variable "key_name" { + type = string +} + +variable "vpc_id" { + type = string +} + +variable "public_subnets" { + description = "VPC public subnets, must be set correctly if create_vpc is true" + type = list(string) +} + +variable "bastion_ingress_cidr" { + description = "IP cidr that allowed to access bastion ec2 instance" + default = ["0.0.0.0/0"] # Note: Please restrict your ingress to only necessary IPs. Opening to 0.0.0.0/0 can lead to security vulnerabilities. +} + +variable "bastion_instance_type" { + description = "bastion ec2 instance type" + default = "t2.micro" +} diff --git a/deploy/aws/aws-key-pair/main.tf b/deploy/modules/aws/key-pair/main.tf similarity index 100% rename from deploy/aws/aws-key-pair/main.tf rename to deploy/modules/aws/key-pair/main.tf diff --git a/deploy/aws/aws-key-pair/outputs.tf b/deploy/modules/aws/key-pair/outputs.tf similarity index 100% rename from deploy/aws/aws-key-pair/outputs.tf rename to deploy/modules/aws/key-pair/outputs.tf diff --git a/deploy/aws/aws-key-pair/variables.tf b/deploy/modules/aws/key-pair/variables.tf similarity index 100% rename from deploy/aws/aws-key-pair/variables.tf rename to deploy/modules/aws/key-pair/variables.tf diff --git a/deploy/aws/aws-key-pair/versions.tf b/deploy/modules/aws/key-pair/versions.tf similarity index 100% rename from deploy/aws/aws-key-pair/versions.tf rename to deploy/modules/aws/key-pair/versions.tf diff --git a/deploy/aws/tidb-cluster/README.md b/deploy/modules/aws/tidb-cluster/README.md similarity index 100% rename from deploy/aws/tidb-cluster/README.md rename to deploy/modules/aws/tidb-cluster/README.md diff --git a/deploy/modules/aws/tidb-cluster/cluster.tf b/deploy/modules/aws/tidb-cluster/cluster.tf new file mode 100644 index 0000000000..014cb289e2 --- /dev/null +++ b/deploy/modules/aws/tidb-cluster/cluster.tf @@ -0,0 +1,14 @@ +module "tidb-cluster" { + source = "../../share/tidb-cluster-release" + + cluster_name = var.cluster_name + cluster_version = var.cluster_version + pd_count = var.pd_count + tikv_count = var.tikv_count + tidb_count = var.tidb_count + tidb_cluster_chart_version = var.tidb_cluster_chart_version + override_values = var.override_values + local_exec_interpreter = var.local_exec_interpreter + base_values = file("${path.module}/values/default.yaml") + kubeconfig_filename = var.eks.kubeconfig_filename +} diff --git a/deploy/aws/tidb-cluster/data.tf b/deploy/modules/aws/tidb-cluster/data.tf similarity index 80% rename from deploy/aws/tidb-cluster/data.tf rename to deploy/modules/aws/tidb-cluster/data.tf index 0a38de9a30..a44aebbde6 100644 --- a/deploy/aws/tidb-cluster/data.tf +++ b/deploy/modules/aws/tidb-cluster/data.tf @@ -71,15 +71,3 @@ data "template_file" "launch_template_userdata" { ) } } - -data "external" "tidb_elb" { - depends_on = [helm_release.tidb-cluster] - working_dir = path.cwd - program = ["bash", "-c", "kubectl --kubeconfig ${var.eks.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] -} - -data "external" "monitor_elb" { - depends_on = [helm_release.tidb-cluster] - working_dir = path.cwd - program = ["bash", "-c", "kubectl --kubeconfig ${var.eks.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] -} diff --git a/deploy/aws/tidb-cluster/local.tf b/deploy/modules/aws/tidb-cluster/local.tf similarity index 100% rename from deploy/aws/tidb-cluster/local.tf rename to deploy/modules/aws/tidb-cluster/local.tf diff --git a/deploy/modules/aws/tidb-cluster/outputs.tf b/deploy/modules/aws/tidb-cluster/outputs.tf new file mode 100644 index 0000000000..4880a4c784 --- /dev/null +++ b/deploy/modules/aws/tidb-cluster/outputs.tf @@ -0,0 +1,7 @@ +output "tidb_hostname" { + value = module.tidb-cluster.tidb_hostname +} + +output "monitor_hostname" { + value = module.tidb-cluster.monitor_hostname +} diff --git a/deploy/aws/tidb-cluster/pre_userdata b/deploy/modules/aws/tidb-cluster/pre_userdata similarity index 100% rename from deploy/aws/tidb-cluster/pre_userdata rename to deploy/modules/aws/tidb-cluster/pre_userdata diff --git a/deploy/aws/tidb-cluster/templates/userdata.sh.tpl b/deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl similarity index 100% rename from deploy/aws/tidb-cluster/templates/userdata.sh.tpl rename to deploy/modules/aws/tidb-cluster/templates/userdata.sh.tpl diff --git a/deploy/aws/tidb-cluster/values/default.yaml b/deploy/modules/aws/tidb-cluster/values/default.yaml similarity index 100% rename from deploy/aws/tidb-cluster/values/default.yaml rename to deploy/modules/aws/tidb-cluster/values/default.yaml diff --git a/deploy/aws/tidb-cluster/variables.tf b/deploy/modules/aws/tidb-cluster/variables.tf similarity index 83% rename from deploy/aws/tidb-cluster/variables.tf rename to deploy/modules/aws/tidb-cluster/variables.tf index 08edbe04ec..820491af5b 100644 --- a/deploy/aws/tidb-cluster/variables.tf +++ b/deploy/modules/aws/tidb-cluster/variables.tf @@ -1,3 +1,73 @@ +variable "tidb_cluster_chart_version" { + description = "tidb-cluster chart version" + default = "v1.0.0-beta.3" +} + +variable "create_tidb_cluster_release" { + description = "Whether create tidb-cluster release in the node pools automatically" + default = true +} + +variable "cluster_name" { + type = string + description = "tidb cluster name" +} + +variable "cluster_version" { + type = string + default = "v3.0.0-rc.2" +} + +variable "ssh_key_name" { + type = string +} + +variable "pd_count" { + type = number + default = 3 +} + +variable "tikv_count" { + type = number + default = 3 +} + +variable "tidb_count" { + type = number + default = 2 +} + +variable "pd_instance_type" { + type = string + default = "m5.xlarge" +} + +variable "tikv_instance_type" { + type = string + default = "c5d.4xlarge" +} + +variable "tidb_instance_type" { + type = string + default = "c5.4xlarge" +} + +variable "monitor_instance_type" { + type = string + default = "c5.2xlarge" +} + +variable "override_values" { + description = "The helm values of TiDB cluster, it is recommended to use the 'file()' function call to read the content from a local file, e.g. 'file(\"my-cluster.yaml\")'" + type = string + default = "" +} + +variable "eks" { + description = "eks info" +} + +# Advanced customization below variable "subnets" { description = "A list of subnets to place the EKS cluster and workers within." type = list(string) @@ -9,23 +79,6 @@ variable "tags" { default = {} } -variable "worker_groups" { - description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys." - type = list(map(string)) - - default = [ - { - name = "default" - }, - ] -} - -variable "worker_group_count" { - description = "The number of maps contained within the worker_groups list." - type = string - default = "1" -} - variable "workers_group_defaults" { description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys." type = map(string) @@ -94,69 +147,3 @@ variable "iam_path" { description = "If provided, all IAM roles will be created on this path." default = "/" } - - - - -variable "tidb_cluster_chart_version" { - description = "tidb-cluster chart version" - default = "v1.0.0-rc.1" -} - -variable "cluster_name" { - type = string - description = "tidb cluster name" -} - -variable "cluster_version" { - type = string - default = "v3.0.0-rc.2" -} - -variable "ssh_key_name" { - type = string -} - -variable "pd_count" { - type = number - default = 1 -} - -variable "tikv_count" { - type = number - default = 1 -} - -variable "tidb_count" { - type = number - default = 1 -} - -variable "pd_instance_type" { - type = string - default = "c5d.large" -} - -variable "tikv_instance_type" { - type = string - default = "c5d.large" -} - -variable "tidb_instance_type" { - type = string - default = "c5d.large" -} - -variable "monitor_instance_type" { - type = string - default = "c5d.large" -} - -variable "override_values" { - type = string - default = "" -} - -variable "eks" { - description = "eks info" -} diff --git a/deploy/aws/tidb-cluster/workers.tf b/deploy/modules/aws/tidb-cluster/workers.tf similarity index 100% rename from deploy/aws/tidb-cluster/workers.tf rename to deploy/modules/aws/tidb-cluster/workers.tf diff --git a/deploy/aws/tidb-cluster/workers_launch_template.tf b/deploy/modules/aws/tidb-cluster/workers_launch_template.tf similarity index 100% rename from deploy/aws/tidb-cluster/workers_launch_template.tf rename to deploy/modules/aws/tidb-cluster/workers_launch_template.tf diff --git a/deploy/aws/tidb-operator/README.md b/deploy/modules/aws/tidb-operator/README.md similarity index 75% rename from deploy/aws/tidb-operator/README.md rename to deploy/modules/aws/tidb-operator/README.md index 6b565f945d..f9883397c7 100644 --- a/deploy/aws/tidb-operator/README.md +++ b/deploy/modules/aws/tidb-operator/README.md @@ -1,7 +1,7 @@ The `tidb-operator` module for AWS spins up a control plane for TiDB in Kubernetes. The following resources will be provisioned: - An EKS cluster -- A auto scaling group to run the control pods listed below +- An auto scaling group to run the control pods listed below - TiDB operator, including `tidb-controller-manager` and `tidb-scheduler` - local-volume-provisioner -- Tiller for Helm \ No newline at end of file +- Tiller for Helm diff --git a/deploy/aws/tidb-operator/main.tf b/deploy/modules/aws/tidb-operator/main.tf similarity index 91% rename from deploy/aws/tidb-operator/main.tf rename to deploy/modules/aws/tidb-operator/main.tf index b04039f689..26ae6e54b5 100644 --- a/deploy/aws/tidb-operator/main.tf +++ b/deploy/modules/aws/tidb-operator/main.tf @@ -46,13 +46,13 @@ resource "null_resource" "setup-env" { depends_on = [local_file.kubeconfig] provisioner "local-exec" { - working_dir = path.module + working_dir = path.cwd command = < kube_config.yaml kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/crd.yaml kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/${var.operator_version}/manifests/tiller-rbac.yaml -kubectl apply -f manifests/local-volume-provisioner.yaml -kubectl apply -f manifests/gp2-storageclass.yaml +kubectl apply -f ${path.module}/manifests/local-volume-provisioner.yaml +kubectl apply -f ${path.module}/manifests/gp2-storageclass.yaml helm init --service-account tiller --upgrade --wait until helm ls; do echo "Wait tiller ready" @@ -75,7 +75,7 @@ data "helm_repository" "pingcap" { resource "helm_release" "tidb-operator" { provider = "helm.initial" - depends_on = ["null_resource.setup-env"] + depends_on = [null_resource.setup-env, local_file.kubeconfig] repository = data.helm_repository.pingcap.name chart = "tidb-operator" diff --git a/deploy/aws/tidb-operator/manifests/gp2-storageclass.yaml b/deploy/modules/aws/tidb-operator/manifests/gp2-storageclass.yaml similarity index 100% rename from deploy/aws/tidb-operator/manifests/gp2-storageclass.yaml rename to deploy/modules/aws/tidb-operator/manifests/gp2-storageclass.yaml diff --git a/deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml b/deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml similarity index 100% rename from deploy/aws/tidb-operator/manifests/local-volume-provisioner.yaml rename to deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml diff --git a/deploy/aws/tidb-operator/outputs.tf b/deploy/modules/aws/tidb-operator/outputs.tf similarity index 100% rename from deploy/aws/tidb-operator/outputs.tf rename to deploy/modules/aws/tidb-operator/outputs.tf diff --git a/deploy/aws/tidb-operator/variables.tf b/deploy/modules/aws/tidb-operator/variables.tf similarity index 100% rename from deploy/aws/tidb-operator/variables.tf rename to deploy/modules/aws/tidb-operator/variables.tf diff --git a/deploy/modules/aws/vpc/main.tf b/deploy/modules/aws/vpc/main.tf new file mode 100644 index 0000000000..7b1c50ce45 --- /dev/null +++ b/deploy/modules/aws/vpc/main.tf @@ -0,0 +1,27 @@ +data "aws_availability_zones" "available" { +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + + version = "2.6.0" + name = var.vpc_name + cidr = var.vpc_cidr + create_vpc = var.create_vpc + azs = data.aws_availability_zones.available.names + private_subnets = var.private_subnets + public_subnets = var.public_subnets + enable_nat_gateway = true + single_nat_gateway = true + + # The following tags are required for ELB + private_subnet_tags = { + "kubernetes.io/cluster/${var.vpc_name}" = "shared" + } + public_subnet_tags = { + "kubernetes.io/cluster/${var.vpc_name}" = "shared" + } + vpc_tags = { + "kubernetes.io/cluster/${var.vpc_name}" = "shared" + } +} diff --git a/deploy/modules/aws/vpc/outputs.tf b/deploy/modules/aws/vpc/outputs.tf new file mode 100644 index 0000000000..a5c5ea2cd3 --- /dev/null +++ b/deploy/modules/aws/vpc/outputs.tf @@ -0,0 +1,11 @@ +output "vpc_id" { + value = var.create_vpc ? module.vpc.vpc_id : var.vpc_id +} + +output "public_subnets" { + value = var.create_vpc ? module.vpc.public_subnets : var.public_subnets +} + +output "private_subnets" { + value = var.create_vpc ? module.vpc.private_subnets : var.private_subnets +} diff --git a/deploy/modules/aws/vpc/variables.tf b/deploy/modules/aws/vpc/variables.tf new file mode 100644 index 0000000000..31c2b8f8e6 --- /dev/null +++ b/deploy/modules/aws/vpc/variables.tf @@ -0,0 +1,31 @@ +variable "vpc_name" { + description = "Name of the VPC" +} + +variable "create_vpc" { + description = "Create a new VPC or not, if true the vpc_id/subnet_ids must be set correctly, otherwise the vpc_cidr/private_subnets/public_subnets must be set correctly" + default = true +} + +variable "vpc_cidr" { + description = "VPC cidr, must be set correctly if create_vpc is true" + default = "10.0.0.0/16" +} + +variable "private_subnets" { + description = "VPC private subnets, must be set correctly if create_vpc is true" + type = list(string) + default = ["10.0.16.0/20", "10.0.32.0/20", "10.0.48.0/20"] +} + +variable "public_subnets" { + description = "VPC public subnets, must be set correctly if create_vpc is true" + type = list(string) + default = ["10.0.64.0/20", "10.0.80.0/20", "10.0.96.0/20"] +} + +variable "vpc_id" { + description = "VPC id, must be set correctly if create_vpc is false" + type = string + default = "" +} diff --git a/deploy/modules/share/tidb-cluster-release/data.tf b/deploy/modules/share/tidb-cluster-release/data.tf new file mode 100644 index 0000000000..e0a6d007b0 --- /dev/null +++ b/deploy/modules/share/tidb-cluster-release/data.tf @@ -0,0 +1,11 @@ +data "external" "tidb_hostname" { + depends_on = [helm_release.tidb-cluster] + working_dir = path.cwd + program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"] +} + +data "external" "monitor_hostname" { + depends_on = [helm_release.tidb-cluster] + working_dir = path.cwd + program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_filename} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"] +} diff --git a/deploy/aws/tidb-cluster/cluster.tf b/deploy/modules/share/tidb-cluster-release/main.tf similarity index 93% rename from deploy/aws/tidb-cluster/cluster.tf rename to deploy/modules/share/tidb-cluster-release/main.tf index f97312d3d6..4f89e914c8 100644 --- a/deploy/aws/tidb-cluster/cluster.tf +++ b/deploy/modules/share/tidb-cluster-release/main.tf @@ -1,5 +1,5 @@ resource "null_resource" "wait-tiller-ready" { - depends_on = [var.eks] + depends_on = [var.kubeconfig_filename] provisioner "local-exec" { working_dir = path.cwd @@ -10,7 +10,7 @@ until helm ls; do done EOS environment = { - KUBECONFIG = var.eks.kubeconfig_filename + KUBECONFIG = var.kubeconfig_filename } } } @@ -32,7 +32,7 @@ resource "helm_release" "tidb-cluster" { wait = false values = [ - file("${path.module}/values/default.yaml"), + var.base_values, var.override_values ] @@ -133,8 +133,9 @@ until kubectl get po -n ${var.cluster_name} -lapp.kubernetes.io/component=tidb | sleep 5 done EOS + interpreter = var.local_exec_interpreter environment = { - KUBECONFIG = var.eks.kubeconfig_filename + KUBECONFIG = var.kubeconfig_filename } } } diff --git a/deploy/modules/share/tidb-cluster-release/outputs.tf b/deploy/modules/share/tidb-cluster-release/outputs.tf new file mode 100644 index 0000000000..a2daa356d3 --- /dev/null +++ b/deploy/modules/share/tidb-cluster-release/outputs.tf @@ -0,0 +1,7 @@ +output "tidb_hostname" { + value = lookup(data.external.tidb_hostname.result, "hostname", "empty") +} + +output "monitor_hostname" { + value = lookup(data.external.monitor_hostname.result, "hostname", "emtpy") +} diff --git a/deploy/modules/share/tidb-cluster-release/variables.tf b/deploy/modules/share/tidb-cluster-release/variables.tf new file mode 100644 index 0000000000..d3820054fc --- /dev/null +++ b/deploy/modules/share/tidb-cluster-release/variables.tf @@ -0,0 +1,50 @@ +variable "kubeconfig_filename" { + description = "The kubeconfig filename, path should be relative to current working dir" + default = "" +} + +variable "tidb_cluster_chart_version" { + description = "tidb-cluster chart version" + default = "v1.0.0-beta.3" +} + +variable "cluster_name" { + type = string + description = "tidb cluster name" +} + +variable "cluster_version" { + type = string + default = "v3.0.0" +} + +variable "pd_count" { + type = number + default = 3 +} + +variable "tikv_count" { + type = number + default = 3 +} + +variable "tidb_count" { + type = number + default = 2 +} + +variable "base_values" { + type = string + default = "" +} + +variable "override_values" { + type = string + default = "" +} + +variable "local_exec_interpreter" { + description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice." + type = list(string) + default = ["/bin/sh", "-c"] +}