From 74cb4c63e75328fedc1b92fd6c520ccb4b80ff1d Mon Sep 17 00:00:00 2001 From: tennix Date: Tue, 30 Apr 2019 02:38:20 +0800 Subject: [PATCH] fix scaling out and helm failure --- deploy/aws/README.md | 5 +---- deploy/aws/data.tf | 49 ++++++++++++++++++++++++++++++++----------- deploy/aws/main.tf | 48 ++++++++++++++++++++++++++++++++++-------- deploy/aws/outputs.tf | 14 +++++++++++-- 4 files changed, 89 insertions(+), 27 deletions(-) diff --git a/deploy/aws/README.md b/deploy/aws/README.md index 1a98d607b44..5ed3e0f997d 100644 --- a/deploy/aws/README.md +++ b/deploy/aws/README.md @@ -4,6 +4,7 @@ * [awscli](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) >= 1.16.73 * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) >= 1.11 * [helm](https://github.com/helm/helm/blob/master/docs/install.md#installing-the-helm-client) >= 2.9.0 +* [jq](https://stedolan.github.io/jq/download/) * [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator#4-set-up-kubectl-to-use-authentication-tokens-provided-by-aws-iam-authenticator-for-kubernetes) ## Configure awscli @@ -75,7 +76,3 @@ Currently, the instance type of TiDB cluster component is not configurable becau - [ ] auto-scaling group policy - [ ] Allow create a minimal TiDB cluster - -## Known issues - -There is possibility the helm install release fails the first time, but running `terraform apply` again will install tidb-operator and tidb-cluster release successfully. diff --git a/deploy/aws/data.tf b/deploy/aws/data.tf index 430bc3cc737..bcfebfb78c1 100644 --- a/deploy/aws/data.tf +++ b/deploy/aws/data.tf @@ -1,5 +1,16 @@ data "aws_availability_zones" "available" {} +data "aws_ami" "amazon-linux-2" { + most_recent = true + + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-hvm-*-x86_64-gp2"] + } +} + data "template_file" "tidb_cluster_values" { template = "${file("${path.module}/templates/tidb-cluster-values.yaml.tpl")}" vars { @@ -10,18 +21,32 @@ data "template_file" "tidb_cluster_values" { } } -data "kubernetes_service" "tidb" { - depends_on = ["helm_release.tidb-cluster"] - metadata { - name = "tidb-cluster-tidb" - namespace = "tidb" - } +# kubernetes provider can't use computed config_path right now, see issue: +# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/142 +# so we don't use kubernetes provider to retrieve tidb and monitor connection info, +# instead we use external data source. +# data "kubernetes_service" "tidb" { +# depends_on = ["helm_release.tidb-cluster"] +# metadata { +# name = "tidb-cluster-tidb" +# namespace = "tidb" +# } +# } + +# data "kubernetes_service" "monitor" { +# depends_on = ["helm_release.tidb-cluster"] +# metadata { +# name = "tidb-cluster-grafana" +# namespace = "tidb" +# } +# } + +data "external" "tidb_service" { + depends_on = ["null_resource.wait-tidb-ready"] + program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-tidb -ojson | jq '.status.loadBalancer.ingress[0]'"] } -data "kubernetes_service" "monitor" { - depends_on = ["helm_release.tidb-cluster"] - metadata { - name = "tidb-cluster-grafana" - namespace = "tidb" - } +data "external" "monitor_service" { + depends_on = ["null_resource.wait-tidb-ready"] + program = ["bash", "-c", "kubectl --kubeconfig credentials/kubeconfig_${var.cluster_name} get svc -n tidb tidb-cluster-grafana -ojson | jq '.status.loadBalancer.ingress[0]'"] } diff --git a/deploy/aws/main.tf b/deploy/aws/main.tf index 87cb0f26e7d..cb48cf0d6a1 100644 --- a/deploy/aws/main.tf +++ b/deploy/aws/main.tf @@ -62,7 +62,7 @@ module "ec2" { version = "1.21.0" name = "${var.cluster_name}-bastion" instance_count = "${var.create_bastion ? 1:0}" - ami = "${var.bastion_ami}" + ami = "${data.aws_ami.amazon-linux-2.id}" instance_type = "${var.bastion_instance_type}" key_name = "${module.key-pair.key_name}" associate_public_ip_address = true @@ -77,13 +77,15 @@ module "ec2" { } module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "2.3.1" + # source = "terraform-aws-modules/eks/aws" + # version = "2.3.1" + # We can not use cluster autoscaler for pod with local PV due to the limitations listed here: + # https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#i-have-a-couple-of-pending-pods-but-there-was-no-scale-up + # so we scale out by updating auto-scaling-group desired_capacity directly via the patched version of aws eks module + source = "github.com/tennix/terraform-aws-eks?ref=v2.3.1-patch" cluster_name = "${var.cluster_name}" cluster_version = "${var.k8s_version}" - # The output config can not be used by kubernetes and helm provider directly - # so using local_file resource to force kubernetes and helm provider to rely on - # config_output_path = "credentials/" + config_output_path = "credentials/" subnets = "${split(",", var.create_vpc ? join(",", module.vpc.private_subnets) : join(",", var.subnets))}" vpc_id = "${var.create_vpc ? module.vpc.vpc_id : var.vpc_id}" @@ -159,9 +161,13 @@ resource "local_file" "kubeconfig" { filename = "${path.module}/credentials/kubeconfig_${var.cluster_name}" } -provider "kubernetes" { - config_path = "${local_file.kubeconfig.filename}" -} +# kubernetes provider can't use computed config_path right now, see issue: +# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/142 +# so we don't use kubernetes provider to retrieve tidb and monitor connection info, +# instead we use external data source. +# provider "kubernetes" { +# config_path = "${local_file.kubeconfig.filename}" +# } provider "helm" { insecure = true @@ -210,3 +216,27 @@ resource "helm_release" "tidb-cluster" { "${data.template_file.tidb_cluster_values.rendered}" ] } + +resource "null_resource" "wait-tidb-ready" { + depends_on = ["helm_release.tidb-cluster"] + + provisioner "local-exec" { + command = <