diff --git a/README.md b/README.md index 0d36bdcb2c..ec43529cd8 100644 --- a/README.md +++ b/README.md @@ -114,6 +114,8 @@ Then perform the following commands on the root folder: | enable\_resource\_consumption\_export | Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. | bool | `"true"` | no | | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/autogen/main/cluster.tf.tmpl b/autogen/main/cluster.tf.tmpl index e72781ecc3..a4266ec9a1 100644 --- a/autogen/main/cluster.tf.tmpl +++ b/autogen/main/cluster.tf.tmpl @@ -529,25 +529,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/autogen/main/dns.tf.tmpl b/autogen/main/dns.tf.tmpl index 20c3b25ee9..13413437df 100644 --- a/autogen/main/dns.tf.tmpl +++ b/autogen/main/dns.tf.tmpl @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/autogen/main/scripts/delete-default-resource.sh b/autogen/main/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/autogen/main/scripts/delete-default-resource.sh +++ b/autogen/main/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/autogen/main/scripts/wait-for-cluster.sh b/autogen/main/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/autogen/main/scripts/wait-for-cluster.sh +++ b/autogen/main/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/autogen/main/variables.tf.tmpl b/autogen/main/variables.tf.tmpl index 30dc00759d..a924a4d8cc 100644 --- a/autogen/main/variables.tf.tmpl +++ b/autogen/main/variables.tf.tmpl @@ -531,3 +531,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/cluster.tf b/cluster.tf index d1d8b7a2f5..504c140249 100644 --- a/cluster.tf +++ b/cluster.tf @@ -249,25 +249,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/dns.tf b/dns.tf index 82be254107..7d4e9b93de 100644 --- a/dns.tf +++ b/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 098a36e0e7..cbced30d9b 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -173,6 +173,8 @@ Then perform the following commands on the root folder: | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | | gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | bool | `"false"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index c2d342d5dc..7c4e82681a 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -481,25 +481,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/beta-private-cluster-update-variant/dns.tf b/modules/beta-private-cluster-update-variant/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/beta-private-cluster-update-variant/dns.tf +++ b/modules/beta-private-cluster-update-variant/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/beta-private-cluster-update-variant/scripts/delete-default-resource.sh b/modules/beta-private-cluster-update-variant/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/beta-private-cluster-update-variant/scripts/delete-default-resource.sh +++ b/modules/beta-private-cluster-update-variant/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index a1c5d74782..3fb37a3bd7 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -523,3 +523,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index e323af5ed4..9e0ecfae22 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -151,6 +151,8 @@ Then perform the following commands on the root folder: | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | | gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | bool | `"false"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index da0c16be96..385e1cfeb0 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -408,25 +408,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/beta-private-cluster/scripts/delete-default-resource.sh b/modules/beta-private-cluster/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/beta-private-cluster/scripts/delete-default-resource.sh +++ b/modules/beta-private-cluster/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/beta-private-cluster/scripts/wait-for-cluster.sh b/modules/beta-private-cluster/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/beta-private-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-private-cluster/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index a1c5d74782..3fb37a3bd7 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -523,3 +523,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/modules/beta-public-cluster-update-variant/README.md b/modules/beta-public-cluster-update-variant/README.md index 6475d8c665..7c1ef28818 100644 --- a/modules/beta-public-cluster-update-variant/README.md +++ b/modules/beta-public-cluster-update-variant/README.md @@ -164,6 +164,8 @@ Then perform the following commands on the root folder: | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | | gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | bool | `"false"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/beta-public-cluster-update-variant/cluster.tf b/modules/beta-public-cluster-update-variant/cluster.tf index 2d60714125..d7485acf99 100644 --- a/modules/beta-public-cluster-update-variant/cluster.tf +++ b/modules/beta-public-cluster-update-variant/cluster.tf @@ -468,25 +468,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/beta-public-cluster-update-variant/dns.tf b/modules/beta-public-cluster-update-variant/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/beta-public-cluster-update-variant/dns.tf +++ b/modules/beta-public-cluster-update-variant/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/beta-public-cluster-update-variant/scripts/delete-default-resource.sh b/modules/beta-public-cluster-update-variant/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/beta-public-cluster-update-variant/scripts/delete-default-resource.sh +++ b/modules/beta-public-cluster-update-variant/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/beta-public-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/beta-public-cluster-update-variant/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/beta-public-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster-update-variant/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/beta-public-cluster-update-variant/variables.tf b/modules/beta-public-cluster-update-variant/variables.tf index 4595792484..25d0248139 100644 --- a/modules/beta-public-cluster-update-variant/variables.tf +++ b/modules/beta-public-cluster-update-variant/variables.tf @@ -499,3 +499,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 4fe4be5e34..90487e7d80 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -142,6 +142,8 @@ Then perform the following commands on the root folder: | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | | gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | bool | `"false"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index e68d354141..1019c1957a 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -395,25 +395,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/beta-public-cluster/scripts/delete-default-resource.sh b/modules/beta-public-cluster/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/beta-public-cluster/scripts/delete-default-resource.sh +++ b/modules/beta-public-cluster/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/beta-public-cluster/scripts/wait-for-cluster.sh b/modules/beta-public-cluster/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/beta-public-cluster/scripts/wait-for-cluster.sh +++ b/modules/beta-public-cluster/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 4595792484..25d0248139 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -499,3 +499,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 49e4b363bc..51b9488e0b 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -145,6 +145,8 @@ Then perform the following commands on the root folder: | enable\_resource\_consumption\_export | Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. | bool | `"true"` | no | | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index a46ce1d389..acd5185c14 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -335,25 +335,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/private-cluster-update-variant/dns.tf b/modules/private-cluster-update-variant/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/private-cluster-update-variant/dns.tf +++ b/modules/private-cluster-update-variant/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/private-cluster-update-variant/scripts/delete-default-resource.sh b/modules/private-cluster-update-variant/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/private-cluster-update-variant/scripts/delete-default-resource.sh +++ b/modules/private-cluster-update-variant/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh +++ b/modules/private-cluster-update-variant/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/private-cluster-update-variant/variables.tf b/modules/private-cluster-update-variant/variables.tf index 0d76d967b2..896196f295 100644 --- a/modules/private-cluster-update-variant/variables.tf +++ b/modules/private-cluster-update-variant/variables.tf @@ -362,3 +362,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 920893ae53..7a76ab1e3d 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -123,6 +123,8 @@ Then perform the following commands on the root folder: | enable\_resource\_consumption\_export | Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. | bool | `"true"` | no | | firewall\_inbound\_ports | List of TCP ports for admission/webhook controllers | list(string) | `` | no | | firewall\_priority | Priority rule for firewall rules | number | `"1000"` | no | +| gcloud\_skip\_download | Whether to skip downloading gcloud (assumes gcloud is already available outside the module) | bool | `"true"` | no | +| gcloud\_upgrade | Whether to upgrade gcloud at runtime | bool | `"false"` | no | | grant\_registry\_access | Grants created cluster-specific service account storage.objectViewer role. | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | | http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index c73ed5fb76..3006a270ae 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -262,25 +262,21 @@ resource "google_container_node_pool" "pools" { } } -resource "null_resource" "wait_for_cluster" { - count = var.skip_provisioners ? 0 : 1 - - triggers = { - project_id = var.project_id - name = var.name - } - - provisioner "local-exec" { - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - provisioner "local-exec" { - when = destroy - command = "${path.module}/scripts/wait-for-cluster.sh ${self.triggers.project_id} ${self.triggers.name}" - } - - depends_on = [ - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_wait_for_cluster" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = var.skip_provisioners + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + create_cmd_body = "${var.project_id} ${var.name}" + destroy_cmd_entrypoint = "${path.module}/scripts/wait-for-cluster.sh" + destroy_cmd_body = "${var.project_id} ${var.name}" + + module_depends_on = concat( + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index 82be254107..7d4e9b93de 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -19,18 +19,23 @@ /****************************************** Delete default kube-dns configmap *****************************************/ -resource "null_resource" "delete_default_kube_dns_configmap" { - count = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners ? 1 : 0 - - provisioner "local-exec" { - command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" - } - - depends_on = [ - data.google_client_config.default, - google_container_cluster.primary, - google_container_node_pool.pools, - ] +module "gcloud_delete_default_kube_dns_configmap" { + source = "terraform-google-modules/gcloud/google" + version = "~> 1.0.1" + enabled = (local.custom_kube_dns_config || local.upstream_nameservers_config) && ! var.skip_provisioners + additional_components = ["kubectl"] + + upgrade = var.gcloud_upgrade + skip_download = var.gcloud_skip_download + + create_cmd_entrypoint = "${path.module}/scripts/kubectl_wrapper.sh" + create_cmd_body = "https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" + + module_depends_on = concat( + [data.google_client_config.default.access_token], + [google_container_cluster.primary.master_version], + [for pool in google_container_node_pool.pools : pool.name] + ) } /****************************************** @@ -55,7 +60,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -82,7 +87,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, @@ -112,7 +117,7 @@ EOF } depends_on = [ - null_resource.delete_default_kube_dns_configmap, + module.gcloud_delete_default_kube_dns_configmap.wait, data.google_client_config.default, google_container_cluster.primary, google_container_node_pool.pools, diff --git a/modules/private-cluster/scripts/delete-default-resource.sh b/modules/private-cluster/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/modules/private-cluster/scripts/delete-default-resource.sh +++ b/modules/private-cluster/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/modules/private-cluster/scripts/wait-for-cluster.sh b/modules/private-cluster/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/modules/private-cluster/scripts/wait-for-cluster.sh +++ b/modules/private-cluster/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 0d76d967b2..896196f295 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -362,3 +362,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +} diff --git a/scripts/delete-default-resource.sh b/scripts/delete-default-resource.sh index 32b2f51a41..d31ee4be7d 100755 --- a/scripts/delete-default-resource.sh +++ b/scripts/delete-default-resource.sh @@ -29,7 +29,7 @@ RESOURCE_LIST=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" || exi # Delete requested resource if [[ $RESOURCE_LIST = *"${RESOURCE_NAME}"* ]]; then - RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" -o json "${RESOURCE_NAME}" | jq -r '.metadata.labels."maintained_by"') + RESOURCE_MAINTAINED_LABEL=$(kubectl -n "${RESOURCE_NAMESPACE}" get "${RESOURCE_TYPE}" "${RESOURCE_NAME}" -o=jsonpath='{.metadata.labels.maintained_by}') if [[ $RESOURCE_MAINTAINED_LABEL = "terraform" ]]; then echo "Terraform maintained ${RESOURCE_NAME} ${RESOURCE_TYPE} appears to have already been created in ${RESOURCE_NAMESPACE} namespace" else diff --git a/scripts/wait-for-cluster.sh b/scripts/wait-for-cluster.sh index 6b715f70e5..9636eeec1b 100755 --- a/scripts/wait-for-cluster.sh +++ b/scripts/wait-for-cluster.sh @@ -25,12 +25,9 @@ CLUSTER_NAME=$2 echo "Waiting for cluster $CLUSTER_NAME in project $PROJECT to reconcile..." -current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") - -while [[ "$current_status" == "RECONCILING" ]]; do - printf "." - sleep 5 - current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") -done +while + current_status=$(gcloud container clusters list --project="$PROJECT" --filter=name:"$CLUSTER_NAME" --format="value(status)") + [[ "${current_status}" == "RECONCILING" ]] +do printf ".";sleep 5; done echo "Cluster is ready!" diff --git a/variables.tf b/variables.tf index 41a569ce89..d9dab264d5 100644 --- a/variables.tf +++ b/variables.tf @@ -338,3 +338,15 @@ variable "firewall_inbound_ports" { description = "List of TCP ports for admission/webhook controllers" default = ["8443", "9443", "15017"] } + +variable "gcloud_upgrade" { + type = bool + description = "Whether to upgrade gcloud at runtime" + default = false +} + +variable "gcloud_skip_download" { + type = bool + description = "Whether to skip downloading gcloud (assumes gcloud is already available outside the module)" + default = true +}