From c2a1401bcb01a3c08a18a685f5bcfb80cdac059a Mon Sep 17 00:00:00 2001 From: Bohdan Yurov Date: Thu, 13 Jun 2019 20:14:06 +0300 Subject: [PATCH 01/16] Fixes #158: Add support for Terraform v0.12 https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/158 https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/75 Add support for TF 0.12. - autogen - root and private_cluster modules - tests (including support for validation) - examples --- CHANGELOG.md | 11 + Gemfile | 2 +- Makefile | 2 +- README.md | 78 +---- auth.tf | 6 +- autogen/README.md | 6 +- autogen/auth.tf | 10 +- autogen/cluster_regional.tf | 281 +++++++++++----- autogen/cluster_zonal.tf | 270 +++++++++++----- autogen/dns.tf | 58 +++- autogen/main.tf | 299 +++++++++++------ autogen/masq.tf | 14 +- autogen/networks.tf | 24 +- autogen/outputs.tf | 44 +-- autogen/sa.tf | 37 ++- autogen/variables.tf | 65 +++- autogen/versions.tf | 19 ++ cluster_regional.tf | 213 ++++++++---- cluster_zonal.tf | 209 ++++++++---- dns.tf | 58 +++- examples/deploy_service/main.tf | 35 +- examples/deploy_service/outputs.tf | 9 +- examples/disable_client_cert/main.tf | 24 +- examples/disable_client_cert/outputs.tf | 9 +- examples/disable_client_cert/variables.tf | 1 + examples/node_pool/main.tf | 53 ++- examples/node_pool/outputs.tf | 9 +- examples/node_pool/variables.tf | 3 +- examples/shared_vpc/main.tf | 23 +- examples/shared_vpc/outputs.tf | 9 +- examples/shared_vpc/variables.tf | 1 + examples/simple_regional/main.tf | 21 +- examples/simple_regional/outputs.tf | 9 +- examples/simple_regional/variables.tf | 1 + examples/simple_regional_beta/main.tf | 34 +- examples/simple_regional_beta/outputs.tf | 8 +- examples/simple_regional_beta/test_outputs.tf | 22 +- examples/simple_regional_beta/versions.tf | 19 ++ examples/simple_regional_private/main.tf | 41 +-- examples/simple_regional_private/outputs.tf | 9 +- examples/simple_regional_private/variables.tf | 1 + examples/simple_regional_private_beta/main.tf | 50 +-- .../simple_regional_private_beta/outputs.tf | 9 +- .../test_outputs.tf | 23 +- .../simple_regional_private_beta/variables.tf | 1 + .../simple_regional_private_beta/versions.tf | 19 ++ examples/simple_zonal/main.tf | 21 +- examples/simple_zonal/outputs.tf | 9 +- examples/simple_zonal/variables.tf | 3 +- examples/simple_zonal_private/main.tf | 43 +-- examples/simple_zonal_private/outputs.tf | 9 +- examples/simple_zonal_private/variables.tf | 3 +- examples/stub_domains/main.tf | 24 +- examples/stub_domains/outputs.tf | 9 +- examples/stub_domains/variables.tf | 1 + examples/stub_domains_private/main.tf | 52 +-- examples/stub_domains_private/outputs.tf | 9 +- examples/stub_domains_private/test_outputs.tf | 21 +- examples/stub_domains_private/variables.tf | 1 + .../stub_domains_upstream_nameservers/main.tf | 24 +- .../outputs.tf | 9 +- .../test_outputs.tf | 21 +- .../variables.tf | 1 + .../versions.tf | 19 ++ examples/upstream_nameservers/main.tf | 23 +- examples/upstream_nameservers/outputs.tf | 9 +- examples/upstream_nameservers/test_outputs.tf | 21 +- examples/upstream_nameservers/variables.tf | 1 + examples/upstream_nameservers/versions.tf | 19 ++ examples/workload_metadata_config/main.tf | 43 +-- examples/workload_metadata_config/outputs.tf | 9 +- .../workload_metadata_config/variables.tf | 3 +- examples/workload_metadata_config/versions.tf | 19 ++ main.tf | 264 ++++++++++----- masq.tf | 14 +- modules/beta-private-cluster/README.md | 91 +----- modules/beta-private-cluster/auth.tf | 6 +- .../beta-private-cluster/cluster_regional.tf | 263 ++++++++++----- modules/beta-private-cluster/cluster_zonal.tf | 256 ++++++++++----- modules/beta-private-cluster/dns.tf | 58 +++- modules/beta-private-cluster/main.tf | 303 +++++++++++------- modules/beta-private-cluster/masq.tf | 14 +- modules/beta-private-cluster/networks.tf | 16 +- modules/beta-private-cluster/outputs.tf | 44 +-- modules/beta-private-cluster/sa.tf | 37 ++- modules/beta-private-cluster/variables.tf | 121 ++++--- modules/beta-private-cluster/versions.tf | 19 ++ modules/beta-public-cluster/README.md | 87 +---- modules/beta-public-cluster/auth.tf | 6 +- .../beta-public-cluster/cluster_regional.tf | 258 +++++++++++---- modules/beta-public-cluster/cluster_zonal.tf | 251 +++++++++++---- modules/beta-public-cluster/dns.tf | 58 +++- modules/beta-public-cluster/main.tf | 298 ++++++++++------- modules/beta-public-cluster/masq.tf | 14 +- modules/beta-public-cluster/networks.tf | 16 +- modules/beta-public-cluster/outputs.tf | 44 +-- modules/beta-public-cluster/sa.tf | 37 ++- modules/beta-public-cluster/variables.tf | 109 ++++--- modules/beta-public-cluster/versions.tf | 19 ++ modules/private-cluster/README.md | 82 +---- modules/private-cluster/auth.tf | 6 +- modules/private-cluster/cluster_regional.tf | 218 +++++++++---- modules/private-cluster/cluster_zonal.tf | 214 +++++++++---- modules/private-cluster/dns.tf | 58 +++- modules/private-cluster/main.tf | 273 ++++++++++------ modules/private-cluster/masq.tf | 14 +- modules/private-cluster/networks.tf | 16 +- modules/private-cluster/outputs.tf | 38 +-- modules/private-cluster/sa.tf | 37 ++- modules/private-cluster/variables.tf | 119 ++++--- modules/private-cluster/versions.tf | 19 ++ networks.tf | 16 +- outputs.tf | 38 +-- sa.tf | 37 ++- test/fixtures/all_examples/test_outputs.tf | 20 +- test/fixtures/deploy_service/example.tf | 15 +- test/fixtures/deploy_service/network.tf | 9 +- test/fixtures/disable_client_cert/example.tf | 19 +- test/fixtures/disable_client_cert/network.tf | 10 +- test/fixtures/node_pool/example.tf | 17 +- test/fixtures/node_pool/network.tf | 9 +- test/fixtures/shared/outputs.tf | 29 +- test/fixtures/shared/variables.tf | 3 +- test/fixtures/shared_vpc/example.tf | 17 +- test/fixtures/shared_vpc/network.tf | 9 +- test/fixtures/simple_regional/example.tf | 15 +- test/fixtures/simple_regional/network.tf | 9 +- .../simple_regional_private/example.tf | 15 +- .../simple_regional_private/network.tf | 13 +- test/fixtures/simple_zonal/example.tf | 15 +- test/fixtures/simple_zonal/network.tf | 9 +- test/fixtures/simple_zonal_private/example.tf | 17 +- test/fixtures/simple_zonal_private/network.tf | 13 +- test/fixtures/stub_domains/example.tf | 15 +- test/fixtures/stub_domains/network.tf | 9 +- test/fixtures/stub_domains_private/main.tf | 25 +- .../example.tf | 15 +- .../network.tf | 7 +- .../versions.tf | 4 + test/fixtures/upstream_nameservers/example.tf | 15 +- test/fixtures/upstream_nameservers/network.tf | 7 +- .../fixtures/upstream_nameservers/versions.tf | 4 + .../workload_metadata_config/example.tf | 16 +- .../workload_metadata_config/network.tf | 10 +- .../workload_metadata_config/versions.tf | 19 ++ variables.tf | 107 ++++--- versions.tf | 19 ++ 147 files changed, 4385 insertions(+), 2613 deletions(-) create mode 100644 autogen/versions.tf create mode 100644 examples/simple_regional_beta/versions.tf create mode 100644 examples/simple_regional_private_beta/versions.tf create mode 100644 examples/stub_domains_upstream_nameservers/versions.tf create mode 100644 examples/upstream_nameservers/versions.tf create mode 100644 examples/workload_metadata_config/versions.tf create mode 100644 modules/beta-private-cluster/versions.tf create mode 100644 modules/beta-public-cluster/versions.tf create mode 100644 modules/private-cluster/versions.tf create mode 100644 test/fixtures/stub_domains_upstream_nameservers/versions.tf create mode 100644 test/fixtures/upstream_nameservers/versions.tf create mode 100644 test/fixtures/workload_metadata_config/versions.tf create mode 100644 versions.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f1269c5f0..f0948c4ef1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] +<<<<<<< HEAD ## [v3.0.0] - 2019-07-08 ### Added @@ -15,6 +16,16 @@ Extending the adopted spec, each change should have a link to its corresponding * Add configuration flag for enable BinAuthZ Admission controller [#160] [#188] * Add configuration flag for `pod_security_policy_config` [#163] [#188] * Support for a guest accelerator in node pool configuration. [#197] +======= +## [2.0.0] 2019-06-ZZ + +### Changed + +* Supported version of Terraform is 0.12. [#58] +* Add configuration flag for enable BinAuthZ Admission controller [#160] +* Add configuration flag for `pod_security_policy_config` [#163] +* Support for a guest accelerator in node pool configuration. [#157] +>>>>>>> Fixes #158: Add support for Terraform v0.12 * Support to scale the default node cluster. [#149] * Support for configuring the network policy provider. [#159] * Support for database encryption. [#165] diff --git a/Gemfile b/Gemfile index 2fffe26f1f..a54d14ec29 100644 --- a/Gemfile +++ b/Gemfile @@ -15,7 +15,7 @@ ruby "~> 2.5" source 'https://rubygems.org/' do - gem "kitchen-terraform", "~> 4.0" + gem "kitchen-terraform", "~> 4.9" gem "kubeclient", "~> 4.0" gem "rest-client", "~> 2.0" end diff --git a/Makefile b/Makefile index ccf08a8d6d..27ecbaf396 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ SHELL := /usr/bin/env bash # Docker build config variables CREDENTIALS_PATH ?= /cft/workdir/credentials.json DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 1.3.0 +DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.0.0 DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} DOCKER_TAG_KITCHEN_TERRAFORM ?= ${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} DOCKER_IMAGE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform_terraform-google-kubernetes-engine diff --git a/README.md b/README.md index f175e8d507..ec8cb68ce8 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ module "gke" { all = {} default-node-pool = { - default-node-pool = "true" + default-node-pool = true } } @@ -74,7 +74,7 @@ module "gke" { default-node-pool = [ { key = "default-node-pool" - value = "true" + value = true effect = "PREFER_NO_SCHEDULE" }, ] @@ -114,78 +114,6 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. [^]: (autogen_docs_start) - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|:----:|:-----:|:-----:| -| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | -| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | -| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | -| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | -| description | The description of the cluster | string | `""` | no | -| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | string | `"true"` | no | -| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | string | `"true"` | no | -| http\_load\_balancing | Enable httpload balancer addon | string | `"true"` | no | -| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | string | `"0"` | no | -| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | string | `"false"` | no | -| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | -| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | -| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | string | `"false"` | no | -| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | -| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | -| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)

### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements @@ -203,7 +131,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog #### Kubectl - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.11.x +- [Terraform](https://www.terraform.io/downloads.html) 0.12 - [Terraform Provider for GCP][terraform-provider-google] v2.9 ### Configure a Service Account diff --git a/auth.tf b/auth.tf index 5ad4160145..48e7cc6a5f 100644 --- a/auth.tf +++ b/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google" + provider = google } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/autogen/README.md b/autogen/README.md index e43b896ede..e66c920a78 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -70,7 +70,7 @@ module "gke" { all = {} default-node-pool = { - default-node-pool = "true" + default-node-pool = true } } @@ -88,7 +88,7 @@ module "gke" { default-node-pool = [ { key = "default-node-pool" - value = "true" + value = true effect = "PREFER_NO_SCHEDULE" }, ] @@ -145,7 +145,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog #### Kubectl - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.11.x +- [Terraform](https://www.terraform.io/downloads.html) 0.12 {% if private_cluster or beta_cluster %} - [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 {% else %} diff --git a/autogen/auth.tf b/autogen/auth.tf index a16136fccf..21275cd41e 100644 --- a/autogen/auth.tf +++ b/autogen/auth.tf @@ -20,7 +20,11 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "{% if private_cluster or beta_cluster %}google-beta{% else %}google{% endif %}" + {% if private_cluster or beta_cluster %} + provider = google-beta + {% else %} + provider = google + {% endif %} } /****************************************** @@ -29,6 +33,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/autogen/cluster_regional.tf b/autogen/cluster_regional.tf index 6dcd2e01ed..c4354f92ca 100644 --- a/autogen/cluster_regional.tf +++ b/autogen/cluster_regional.tf @@ -20,78 +20,120 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "{% if private_cluster or beta_cluster %}google-beta{% else %}google{% endif %}" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" - - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" - - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" - - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" - - {% if beta_cluster %} - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" + {% if private_cluster or beta_cluster %} + provider = google-beta + {% else %} + provider = google {% endif %} - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id + + region = var.region + + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) + + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + +{% if beta_cluster %} + enable_binary_authorization = var.enable_binary_authorization + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + +{% endif %} + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } {% if beta_cluster %} istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } {% endif %} } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -102,28 +144,42 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) {% if beta_cluster %} - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } {% endif %} } } -{% if private_cluster %} +{% if private_cluster %} private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } {% endif %} - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool {% if beta_cluster %} - database_encryption = ["${var.database_encryption}"] + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } {% endif %} } @@ -131,55 +187,115 @@ resource "google_container_cluster" "primary" { Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] - - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } {% if beta_cluster %} - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } {% endif %} } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -190,16 +306,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/autogen/cluster_zonal.tf b/autogen/cluster_zonal.tf index 24ed5671e6..9f3d6c4273 100644 --- a/autogen/cluster_zonal.tf +++ b/autogen/cluster_zonal.tf @@ -20,79 +20,115 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "{% if private_cluster or beta_cluster %}google-beta{% else %}google{% endif %}" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + {% if private_cluster or beta_cluster %} + provider = google-beta + {% else %} + provider = google + {% endif %} - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + dynamic "network_policy" { + for_each = local.cluster_network_policy - {% if beta_cluster %} - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } - {% endif %} - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + +{% if beta_cluster %} + enable_binary_authorization = var.enable_binary_authorization + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + +{% endif %} + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } {% if beta_cluster %} istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } {% endif %} } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -103,28 +139,42 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) {% if beta_cluster %} - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } {% endif %} } } -{% if private_cluster %} +{% if private_cluster %} private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } {% endif %} - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool {% if beta_cluster %} - database_encryption = ["${var.database_encryption}"] + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } {% endif %} } @@ -132,55 +182,116 @@ resource "google_container_cluster" "zonal_primary" { Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] - - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } {% if beta_cluster %} - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } {% endif %} } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -191,16 +302,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/autogen/dns.tf b/autogen/dns.tf index 24a3f34844..65c8d99d65 100644 --- a/autogen/dns.tf +++ b/autogen/dns.tf @@ -20,73 +20,94 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config && !local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + + cluster_type = var.regional ? "regional" : "zonal" + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] - cluster_cloudrun_config = { - enabled = [{disabled = "false"}] - disabled = [] - } {% if beta_cluster %} + cluster_cloudrun_config = var.cloudrun ? [{disabled = false}] : [] - cluster_node_metadata_config = { - specified = [{node_metadata = "${var.node_metadata}"}] - unspecified = [] - } + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] {% endif %} cluster_type_output_name = { - regional = "${element(concat(google_container_cluster.primary.*.name, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.name, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.name, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.name, [""]), + 0, + ) } cluster_type_output_location = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.zone, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.zone, [""]), + 0, + ) } cluster_type_output_region = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${var.region}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = var.region } - cluster_type_output_regional_zones = "${flatten(google_container_cluster.primary.*.node_locations)}" - cluster_type_output_zonal_zones = "${slice(var.zones, 1, length(var.zones))}" + cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) + cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) cluster_type_output_zones = { - regional = "${local.cluster_type_output_regional_zones}" - zonal = "${concat(google_container_cluster.zonal_primary.*.zone, local.cluster_type_output_zonal_zones)}" + regional = local.cluster_type_output_regional_zones + zonal = concat( + google_container_cluster.zonal_primary.*.zone, + local.cluster_type_output_zonal_zones, + ) } {% if private_cluster %} cluster_type_output_endpoint = { - regional = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.primary.*.endpoint, list("")), 0) - }" - - zonal = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0) - }" + regional = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) + + zonal = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.zonal_primary.*.endpoint, [""]), 0) } {% else %} cluster_type_output_endpoint = { - regional = "${element(concat(google_container_cluster.primary.*.endpoint, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.endpoint, [""]), + 0, + ) } {% endif %} cluster_type_output_master_auth = { - regional = "${concat(google_container_cluster.primary.*.master_auth, list())}" - zonal = "${concat(google_container_cluster.zonal_primary.*.master_auth, list())}" + regional = concat(google_container_cluster.primary.*.master_auth, []) + zonal = concat(google_container_cluster.zonal_primary.*.master_auth, []) } cluster_type_output_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.master_version, + [""], + ), + 0, + ) } cluster_type_output_min_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.min_master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.min_master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.min_master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.min_master_version, + [""], + ), + 0, + ) } cluster_type_output_logging_service = { - regional = "${element(concat(google_container_cluster.primary.*.logging_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.logging_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.logging_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.logging_service, + [""], + ), + 0, + ) } cluster_type_output_monitoring_service = { - regional = "${element(concat(google_container_cluster.primary.*.monitoring_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.monitoring_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.monitoring_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.monitoring_service, + [""], + ), + 0, + ) } cluster_type_output_network_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_http_load_balancing_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_horizontal_pod_autoscaling_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_kubernetes_dashboard_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) } {% if beta_cluster %} # BETA features cluster_type_output_istio_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) } cluster_type_output_pod_security_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, [""]), 0) } # /BETA features {% endif %} cluster_type_output_node_pools_names = { - regional = "${concat(google_container_node_pool.pools.*.name, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.name, list(""))}" + regional = concat(google_container_node_pool.pools.*.name, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.name, [""]) } cluster_type_output_node_pools_versions = { - regional = "${concat(google_container_node_pool.pools.*.version, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.version, list(""))}" + regional = concat(google_container_node_pool.pools.*.version, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.version, [""]) } - cluster_master_auth_list_layer1 = "${local.cluster_type_output_master_auth[local.cluster_type]}" - cluster_master_auth_list_layer2 = "${local.cluster_master_auth_list_layer1[0]}" - cluster_master_auth_map = "${local.cluster_master_auth_list_layer2[0]}" + cluster_master_auth_list_layer1 = local.cluster_type_output_master_auth[local.cluster_type] + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] # cluster locals - cluster_name = "${local.cluster_type_output_name[local.cluster_type]}" - cluster_location = "${local.cluster_type_output_location[local.cluster_type]}" - cluster_region = "${local.cluster_type_output_region[local.cluster_type]}" - cluster_zones = "${sort(local.cluster_type_output_zones[local.cluster_type])}" - cluster_endpoint = "${local.cluster_type_output_endpoint[local.cluster_type]}" - cluster_ca_certificate = "${lookup(local.cluster_master_auth_map, "cluster_ca_certificate")}" - cluster_master_version = "${local.cluster_type_output_master_version[local.cluster_type]}" - cluster_min_master_version = "${local.cluster_type_output_min_master_version[local.cluster_type]}" - cluster_logging_service = "${local.cluster_type_output_logging_service[local.cluster_type]}" - cluster_monitoring_service = "${local.cluster_type_output_monitoring_service[local.cluster_type]}" - cluster_node_pools_names = "${local.cluster_type_output_node_pools_names[local.cluster_type]}" - cluster_node_pools_versions = "${local.cluster_type_output_node_pools_versions[local.cluster_type]}" - cluster_network_policy_enabled = "${local.cluster_type_output_network_policy_enabled[local.cluster_type] ? false : true}" - cluster_http_load_balancing_enabled = "${local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] ? false : true}" - cluster_horizontal_pod_autoscaling_enabled = "${local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] ? false : true}" - cluster_kubernetes_dashboard_enabled = "${local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] ? false : true}" + cluster_name = local.cluster_type_output_name[local.cluster_type] + cluster_location = local.cluster_type_output_location[local.cluster_type] + cluster_region = local.cluster_type_output_region[local.cluster_type] + cluster_zones = sort(local.cluster_type_output_zones[local.cluster_type]) + cluster_endpoint = local.cluster_type_output_endpoint[local.cluster_type] + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_type_output_master_version[local.cluster_type] + cluster_min_master_version = local.cluster_type_output_min_master_version[local.cluster_type] + cluster_logging_service = local.cluster_type_output_logging_service[local.cluster_type] + cluster_monitoring_service = local.cluster_type_output_monitoring_service[local.cluster_type] + cluster_node_pools_names = local.cluster_type_output_node_pools_names[local.cluster_type] + cluster_node_pools_versions = local.cluster_type_output_node_pools_versions[local.cluster_type] + cluster_network_policy_enabled = !local.cluster_type_output_network_policy_enabled[local.cluster_type] + cluster_http_load_balancing_enabled = !local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] + cluster_horizontal_pod_autoscaling_enabled = !local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] + cluster_kubernetes_dashboard_enabled = !local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] {% if beta_cluster %} # BETA features - cluster_istio_enabled = "${local.cluster_type_output_istio_enabled[local.cluster_type] ? false : true}" - cluster_cloudrun_enabled = "${var.cloudrun}" - cluster_pod_security_policy_enabled = "${local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] ? true : false}" + cluster_istio_enabled = !local.cluster_type_output_istio_enabled[local.cluster_type] + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] # /BETA features {% endif %} } @@ -207,9 +296,9 @@ locals { Get available container engine versions *****************************************/ data "google_container_engine_versions" "region" { - provider = "google-beta" - region = "${var.region}" - project = "${var.project_id}" + provider = google-beta + region = var.region + project = var.project_id } data "google_container_engine_versions" "zone" { @@ -217,7 +306,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = "${var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0]}" + zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] - project = "${var.project_id}" + project = var.project_id } diff --git a/autogen/masq.tf b/autogen/masq.tf index 83aa443a3e..afd34e148d 100644 --- a/autogen/masq.tf +++ b/autogen/masq.tf @@ -20,18 +20,18 @@ Create ip-masq-agent confimap *****************************************/ resource "kubernetes_config_map" "ip-masq-agent" { - count = "${var.configure_ip_masq ? 1 : 0}" + count = var.configure_ip_masq ? 1 : 0 metadata { name = "ip-masq-agent" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { config = < 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -159,16 +239,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/cluster_zonal.tf b/cluster_zonal.tf index 466b81634d..13cdb99ace 100644 --- a/cluster_zonal.tf +++ b/cluster_zonal.tf @@ -20,66 +20,86 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "google" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + dynamic "network_policy" { + for_each = local.cluster_network_policy - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -90,65 +110,121 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } - remove_default_node_pool = "${var.remove_default_node_pool}" + + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] - - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -159,16 +235,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/dns.tf b/dns.tf index 91b41efac4..7138473ded 100644 --- a/dns.tf +++ b/dns.tf @@ -20,73 +20,94 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config && !local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id - cluster_cloudrun_config = { - enabled = [{ - disabled = "false" - }] + cluster_type = var.regional ? "regional" : "zonal" - disabled = [] - } + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] cluster_type_output_name = { - regional = "${element(concat(google_container_cluster.primary.*.name, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.name, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.name, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.name, [""]), + 0, + ) } cluster_type_output_location = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.zone, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.zone, [""]), + 0, + ) } cluster_type_output_region = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${var.region}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = var.region } - cluster_type_output_regional_zones = "${flatten(google_container_cluster.primary.*.node_locations)}" - cluster_type_output_zonal_zones = "${slice(var.zones, 1, length(var.zones))}" + cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) + cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) cluster_type_output_zones = { - regional = "${local.cluster_type_output_regional_zones}" - zonal = "${concat(google_container_cluster.zonal_primary.*.zone, local.cluster_type_output_zonal_zones)}" + regional = local.cluster_type_output_regional_zones + zonal = concat( + google_container_cluster.zonal_primary.*.zone, + local.cluster_type_output_zonal_zones, + ) } cluster_type_output_endpoint = { - regional = "${element(concat(google_container_cluster.primary.*.endpoint, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.endpoint, [""]), + 0, + ) } cluster_type_output_master_auth = { - regional = "${concat(google_container_cluster.primary.*.master_auth, list())}" - zonal = "${concat(google_container_cluster.zonal_primary.*.master_auth, list())}" + regional = concat(google_container_cluster.primary.*.master_auth, []) + zonal = concat(google_container_cluster.zonal_primary.*.master_auth, []) } cluster_type_output_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.master_version, + [""], + ), + 0, + ) } cluster_type_output_min_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.min_master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.min_master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.min_master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.min_master_version, + [""], + ), + 0, + ) } cluster_type_output_logging_service = { - regional = "${element(concat(google_container_cluster.primary.*.logging_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.logging_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.logging_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.logging_service, + [""], + ), + 0, + ) } cluster_type_output_monitoring_service = { - regional = "${element(concat(google_container_cluster.primary.*.monitoring_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.monitoring_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.monitoring_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.monitoring_service, + [""], + ), + 0, + ) } cluster_type_output_network_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_http_load_balancing_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_horizontal_pod_autoscaling_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_kubernetes_dashboard_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) } + cluster_type_output_node_pools_names = { - regional = "${concat(google_container_node_pool.pools.*.name, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.name, list(""))}" + regional = concat(google_container_node_pool.pools.*.name, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.name, [""]) } cluster_type_output_node_pools_versions = { - regional = "${concat(google_container_node_pool.pools.*.version, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.version, list(""))}" + regional = concat(google_container_node_pool.pools.*.version, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.version, [""]) } - cluster_master_auth_list_layer1 = "${local.cluster_type_output_master_auth[local.cluster_type]}" - cluster_master_auth_list_layer2 = "${local.cluster_master_auth_list_layer1[0]}" - cluster_master_auth_map = "${local.cluster_master_auth_list_layer2[0]}" - + cluster_master_auth_list_layer1 = local.cluster_type_output_master_auth[local.cluster_type] + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] # cluster locals - cluster_name = "${local.cluster_type_output_name[local.cluster_type]}" - cluster_location = "${local.cluster_type_output_location[local.cluster_type]}" - cluster_region = "${local.cluster_type_output_region[local.cluster_type]}" - cluster_zones = "${sort(local.cluster_type_output_zones[local.cluster_type])}" - cluster_endpoint = "${local.cluster_type_output_endpoint[local.cluster_type]}" - cluster_ca_certificate = "${lookup(local.cluster_master_auth_map, "cluster_ca_certificate")}" - cluster_master_version = "${local.cluster_type_output_master_version[local.cluster_type]}" - cluster_min_master_version = "${local.cluster_type_output_min_master_version[local.cluster_type]}" - cluster_logging_service = "${local.cluster_type_output_logging_service[local.cluster_type]}" - cluster_monitoring_service = "${local.cluster_type_output_monitoring_service[local.cluster_type]}" - cluster_node_pools_names = "${local.cluster_type_output_node_pools_names[local.cluster_type]}" - cluster_node_pools_versions = "${local.cluster_type_output_node_pools_versions[local.cluster_type]}" - cluster_network_policy_enabled = "${local.cluster_type_output_network_policy_enabled[local.cluster_type] ? false : true}" - cluster_http_load_balancing_enabled = "${local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] ? false : true}" - cluster_horizontal_pod_autoscaling_enabled = "${local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] ? false : true}" - cluster_kubernetes_dashboard_enabled = "${local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] ? false : true}" + cluster_name = local.cluster_type_output_name[local.cluster_type] + cluster_location = local.cluster_type_output_location[local.cluster_type] + cluster_region = local.cluster_type_output_region[local.cluster_type] + cluster_zones = sort(local.cluster_type_output_zones[local.cluster_type]) + cluster_endpoint = local.cluster_type_output_endpoint[local.cluster_type] + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_type_output_master_version[local.cluster_type] + cluster_min_master_version = local.cluster_type_output_min_master_version[local.cluster_type] + cluster_logging_service = local.cluster_type_output_logging_service[local.cluster_type] + cluster_monitoring_service = local.cluster_type_output_monitoring_service[local.cluster_type] + cluster_node_pools_names = local.cluster_type_output_node_pools_names[local.cluster_type] + cluster_node_pools_versions = local.cluster_type_output_node_pools_versions[local.cluster_type] + cluster_network_policy_enabled = ! local.cluster_type_output_network_policy_enabled[local.cluster_type] + cluster_http_load_balancing_enabled = ! local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] + cluster_kubernetes_dashboard_enabled = ! local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] } /****************************************** Get available container engine versions *****************************************/ data "google_container_engine_versions" "region" { - provider = "google-beta" - region = "${var.region}" - project = "${var.project_id}" + provider = google-beta + region = var.region + project = var.project_id } data "google_container_engine_versions" "zone" { @@ -180,7 +266,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = "${var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0]}" + zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] - project = "${var.project_id}" + project = var.project_id } diff --git a/masq.tf b/masq.tf index 3006578627..1e9dc7791d 100644 --- a/masq.tf +++ b/masq.tf @@ -20,18 +20,18 @@ Create ip-masq-agent confimap *****************************************/ resource "kubernetes_config_map" "ip-masq-agent" { - count = "${var.configure_ip_masq ? 1 : 0}" + count = var.configure_ip_masq ? 1 : 0 metadata { name = "ip-masq-agent" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { config = <` | no | -| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | string | `"false"` | no | -| description | The description of the cluster | string | `""` | no | -| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | string | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | string | `"false"` | no | -| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | string | `"false"` | no | -| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | string | `"true"` | no | -| http\_load\_balancing | Enable httpload balancer addon | string | `"true"` | no | -| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | string | `"0"` | no | -| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | string | `"false"` | no | -| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | -| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | -| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | string | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | string | `"false"` | no | -| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | -| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | -| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)

### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements @@ -223,7 +138,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog #### Kubectl - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.11.x +- [Terraform](https://www.terraform.io/downloads.html) 0.12 - [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 ### Configure a Service Account diff --git a/modules/beta-private-cluster/auth.tf b/modules/beta-private-cluster/auth.tf index 0bbafaf4a2..c177eee5a7 100644 --- a/modules/beta-private-cluster/auth.tf +++ b/modules/beta-private-cluster/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google-beta" + provider = google-beta } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/modules/beta-private-cluster/cluster_regional.tf b/modules/beta-private-cluster/cluster_regional.tf index 4142486488..d26d9d9df9 100644 --- a/modules/beta-private-cluster/cluster_regional.tf +++ b/modules/beta-private-cluster/cluster_regional.tf @@ -20,74 +20,112 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "google-beta" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" + region = var.region - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -98,76 +136,150 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config + + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" - database_encryption = ["${var.database_encryption}"] + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } } /****************************************** Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } + } - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } } - - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -178,16 +290,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/modules/beta-private-cluster/cluster_zonal.tf b/modules/beta-private-cluster/cluster_zonal.tf index 9df66bfbc1..afbc294f0a 100644 --- a/modules/beta-private-cluster/cluster_zonal.tf +++ b/modules/beta-private-cluster/cluster_zonal.tf @@ -20,75 +20,107 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "google-beta" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + dynamic "network_policy" { + for_each = local.cluster_network_policy - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -99,76 +131,151 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" - database_encryption = ["${var.database_encryption}"] + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } + } - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } } - - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -179,16 +286,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/modules/beta-private-cluster/dns.tf b/modules/beta-private-cluster/dns.tf index 91b41efac4..7138473ded 100644 --- a/modules/beta-private-cluster/dns.tf +++ b/modules/beta-private-cluster/dns.tf @@ -20,73 +20,94 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config && !local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + + cluster_type = var.regional ? "regional" : "zonal" + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + cluster_cloudrun_config = var.cloudrun ? [{ disabled = false }] : [] + + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] cluster_type_output_name = { - regional = "${element(concat(google_container_cluster.primary.*.name, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.name, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.name, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.name, [""]), + 0, + ) } cluster_type_output_location = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.zone, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.zone, [""]), + 0, + ) } cluster_type_output_region = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${var.region}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = var.region } - cluster_type_output_regional_zones = "${flatten(google_container_cluster.primary.*.node_locations)}" - cluster_type_output_zonal_zones = "${slice(var.zones, 1, length(var.zones))}" + cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) + cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) cluster_type_output_zones = { - regional = "${local.cluster_type_output_regional_zones}" - zonal = "${concat(google_container_cluster.zonal_primary.*.zone, local.cluster_type_output_zonal_zones)}" + regional = local.cluster_type_output_regional_zones + zonal = concat( + google_container_cluster.zonal_primary.*.zone, + local.cluster_type_output_zonal_zones, + ) } cluster_type_output_endpoint = { - regional = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.primary.*.endpoint, list("")), 0) - }" + regional = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) - zonal = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0) - }" + zonal = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.zonal_primary.*.endpoint, [""]), 0) } cluster_type_output_master_auth = { - regional = "${concat(google_container_cluster.primary.*.master_auth, list())}" - zonal = "${concat(google_container_cluster.zonal_primary.*.master_auth, list())}" + regional = concat(google_container_cluster.primary.*.master_auth, []) + zonal = concat(google_container_cluster.zonal_primary.*.master_auth, []) } cluster_type_output_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.master_version, + [""], + ), + 0, + ) } cluster_type_output_min_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.min_master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.min_master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.min_master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.min_master_version, + [""], + ), + 0, + ) } cluster_type_output_logging_service = { - regional = "${element(concat(google_container_cluster.primary.*.logging_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.logging_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.logging_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.logging_service, + [""], + ), + 0, + ) } cluster_type_output_monitoring_service = { - regional = "${element(concat(google_container_cluster.primary.*.monitoring_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.monitoring_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.monitoring_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.monitoring_service, + [""], + ), + 0, + ) } cluster_type_output_network_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_http_load_balancing_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_horizontal_pod_autoscaling_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_kubernetes_dashboard_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) } # BETA features cluster_type_output_istio_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) } cluster_type_output_pod_security_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, [""]), 0) } - # /BETA features cluster_type_output_node_pools_names = { - regional = "${concat(google_container_node_pool.pools.*.name, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.name, list(""))}" + regional = concat(google_container_node_pool.pools.*.name, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.name, [""]) } + cluster_type_output_node_pools_versions = { - regional = "${concat(google_container_node_pool.pools.*.version, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.version, list(""))}" + regional = concat(google_container_node_pool.pools.*.version, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.version, [""]) } - cluster_master_auth_list_layer1 = "${local.cluster_type_output_master_auth[local.cluster_type]}" - cluster_master_auth_list_layer2 = "${local.cluster_master_auth_list_layer1[0]}" - cluster_master_auth_map = "${local.cluster_master_auth_list_layer2[0]}" + + cluster_master_auth_list_layer1 = local.cluster_type_output_master_auth[local.cluster_type] + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] # cluster locals - cluster_name = "${local.cluster_type_output_name[local.cluster_type]}" - cluster_location = "${local.cluster_type_output_location[local.cluster_type]}" - cluster_region = "${local.cluster_type_output_region[local.cluster_type]}" - cluster_zones = "${sort(local.cluster_type_output_zones[local.cluster_type])}" - cluster_endpoint = "${local.cluster_type_output_endpoint[local.cluster_type]}" - cluster_ca_certificate = "${lookup(local.cluster_master_auth_map, "cluster_ca_certificate")}" - cluster_master_version = "${local.cluster_type_output_master_version[local.cluster_type]}" - cluster_min_master_version = "${local.cluster_type_output_min_master_version[local.cluster_type]}" - cluster_logging_service = "${local.cluster_type_output_logging_service[local.cluster_type]}" - cluster_monitoring_service = "${local.cluster_type_output_monitoring_service[local.cluster_type]}" - cluster_node_pools_names = "${local.cluster_type_output_node_pools_names[local.cluster_type]}" - cluster_node_pools_versions = "${local.cluster_type_output_node_pools_versions[local.cluster_type]}" - cluster_network_policy_enabled = "${local.cluster_type_output_network_policy_enabled[local.cluster_type] ? false : true}" - cluster_http_load_balancing_enabled = "${local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] ? false : true}" - cluster_horizontal_pod_autoscaling_enabled = "${local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] ? false : true}" - cluster_kubernetes_dashboard_enabled = "${local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] ? false : true}" + cluster_name = local.cluster_type_output_name[local.cluster_type] + cluster_location = local.cluster_type_output_location[local.cluster_type] + cluster_region = local.cluster_type_output_region[local.cluster_type] + cluster_zones = sort(local.cluster_type_output_zones[local.cluster_type]) + cluster_endpoint = local.cluster_type_output_endpoint[local.cluster_type] + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_type_output_master_version[local.cluster_type] + cluster_min_master_version = local.cluster_type_output_min_master_version[local.cluster_type] + cluster_logging_service = local.cluster_type_output_logging_service[local.cluster_type] + cluster_monitoring_service = local.cluster_type_output_monitoring_service[local.cluster_type] + cluster_node_pools_names = local.cluster_type_output_node_pools_names[local.cluster_type] + cluster_node_pools_versions = local.cluster_type_output_node_pools_versions[local.cluster_type] + cluster_network_policy_enabled = ! local.cluster_type_output_network_policy_enabled[local.cluster_type] + cluster_http_load_balancing_enabled = ! local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] + cluster_kubernetes_dashboard_enabled = ! local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] # BETA features - cluster_istio_enabled = "${local.cluster_type_output_istio_enabled[local.cluster_type] ? false : true}" - cluster_cloudrun_enabled = "${var.cloudrun}" - cluster_pod_security_policy_enabled = "${local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] ? true : false}" - + cluster_istio_enabled = ! local.cluster_type_output_istio_enabled[local.cluster_type] + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] # /BETA features } @@ -203,9 +276,9 @@ locals { Get available container engine versions *****************************************/ data "google_container_engine_versions" "region" { - provider = "google-beta" - region = "${var.region}" - project = "${var.project_id}" + provider = google-beta + region = var.region + project = var.project_id } data "google_container_engine_versions" "zone" { @@ -213,7 +286,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = "${var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0]}" + zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] - project = "${var.project_id}" + project = var.project_id } diff --git a/modules/beta-private-cluster/masq.tf b/modules/beta-private-cluster/masq.tf index 3006578627..1e9dc7791d 100644 --- a/modules/beta-private-cluster/masq.tf +++ b/modules/beta-private-cluster/masq.tf @@ -20,18 +20,18 @@ Create ip-masq-agent confimap *****************************************/ resource "kubernetes_config_map" "ip-masq-agent" { - count = "${var.configure_ip_masq ? 1 : 0}" + count = var.configure_ip_masq ? 1 : 0 metadata { name = "ip-masq-agent" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { config = <` | no | -| description | The description of the cluster | string | `""` | no | -| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | string | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | -| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | string | `"true"` | no | -| http\_load\_balancing | Enable httpload balancer addon | string | `"true"` | no | -| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | string | `"0"` | no | -| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | string | `"false"` | no | -| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | -| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | -| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | -| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | string | `"false"` | no | -| istio | (Beta) Enable Istio addon | string | `"false"` | no | -| kubernetes\_dashboard | Enable kubernetes dashboard addon | string | `"false"` | no | -| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | -| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | -| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)

### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| cloudrun\_enabled | Whether CloudRun enabled | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| istio\_enabled | Whether Istio is enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements @@ -214,7 +133,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog #### Kubectl - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.11.x +- [Terraform](https://www.terraform.io/downloads.html) 0.12 - [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 ### Configure a Service Account diff --git a/modules/beta-public-cluster/auth.tf b/modules/beta-public-cluster/auth.tf index 0bbafaf4a2..c177eee5a7 100644 --- a/modules/beta-public-cluster/auth.tf +++ b/modules/beta-public-cluster/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google-beta" + provider = google-beta } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/modules/beta-public-cluster/cluster_regional.tf b/modules/beta-public-cluster/cluster_regional.tf index b651323baf..af21dc605a 100644 --- a/modules/beta-public-cluster/cluster_regional.tf +++ b/modules/beta-public-cluster/cluster_regional.tf @@ -20,74 +20,112 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "google-beta" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" + region = var.region - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -98,70 +136,145 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } } } - remove_default_node_pool = "${var.remove_default_node_pool}" - database_encryption = ["${var.database_encryption}"] + + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } } /****************************************** Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } + } - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } } - - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -172,16 +285,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/modules/beta-public-cluster/cluster_zonal.tf b/modules/beta-public-cluster/cluster_zonal.tf index dca12fd9ce..c35f11818e 100644 --- a/modules/beta-public-cluster/cluster_zonal.tf +++ b/modules/beta-public-cluster/cluster_zonal.tf @@ -20,75 +20,107 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "google-beta" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + dynamic "network_policy" { + for_each = local.cluster_network_policy - enable_binary_authorization = "${var.enable_binary_authorization}" - pod_security_policy_config = "${var.pod_security_policy_config}" + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + enable_binary_authorization = var.enable_binary_authorization + + dynamic "pod_security_policy_config" { + for_each = var.pod_security_policy_config + content { + enabled = pod_security_policy_config.value.enabled + } + } + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } istio_config { - disabled = "${var.istio ? 0 : 1}" + disabled = ! var.istio } - cloudrun_config = "${local.cluster_cloudrun_config["${var.cloudrun ? "enabled" : "disabled"}"]}" + dynamic "cloudrun_config" { + for_each = local.cluster_cloudrun_config + + content { + disabled = cloudrun_config.value.disabled + } + } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -99,70 +131,146 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) + + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } + } } } - remove_default_node_pool = "${var.remove_default_node_pool}" - database_encryption = ["${var.database_encryption}"] + + remove_default_node_pool = var.remove_default_node_pool + + dynamic "database_encryption" { + for_each = var.database_encryption + + content { + key_name = database_encryption.value.key_name + state = database_encryption.value.state + } + } } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } + } - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] + dynamic "workload_metadata_config" { + for_each = local.cluster_node_metadata_config - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + content { + node_metadata = workload_metadata_config.value.node_metadata + } } - - workload_metadata_config = "${local.cluster_node_metadata_config["${var.node_metadata == "UNSPECIFIED" ? "unspecified" : "specified"}"]}" } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -173,16 +281,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/modules/beta-public-cluster/dns.tf b/modules/beta-public-cluster/dns.tf index 91b41efac4..7138473ded 100644 --- a/modules/beta-public-cluster/dns.tf +++ b/modules/beta-public-cluster/dns.tf @@ -20,73 +20,94 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config && !local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + + cluster_type = var.regional ? "regional" : "zonal" + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] + + cluster_cloudrun_config = var.cloudrun ? [{ disabled = false }] : [] + + cluster_node_metadata_config = var.node_metadata == "UNSPECIFIED" ? [] : [{ + node_metadata = var.node_metadata + }] cluster_type_output_name = { - regional = "${element(concat(google_container_cluster.primary.*.name, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.name, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.name, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.name, [""]), + 0, + ) } cluster_type_output_location = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.zone, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.zone, [""]), + 0, + ) } cluster_type_output_region = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${var.region}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = var.region } - cluster_type_output_regional_zones = "${flatten(google_container_cluster.primary.*.node_locations)}" - cluster_type_output_zonal_zones = "${slice(var.zones, 1, length(var.zones))}" + cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) + cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) cluster_type_output_zones = { - regional = "${local.cluster_type_output_regional_zones}" - zonal = "${concat(google_container_cluster.zonal_primary.*.zone, local.cluster_type_output_zonal_zones)}" + regional = local.cluster_type_output_regional_zones + zonal = concat( + google_container_cluster.zonal_primary.*.zone, + local.cluster_type_output_zonal_zones, + ) } cluster_type_output_endpoint = { - regional = "${element(concat(google_container_cluster.primary.*.endpoint, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.endpoint, [""]), + 0, + ) } cluster_type_output_master_auth = { - regional = "${concat(google_container_cluster.primary.*.master_auth, list())}" - zonal = "${concat(google_container_cluster.zonal_primary.*.master_auth, list())}" + regional = concat(google_container_cluster.primary.*.master_auth, []) + zonal = concat(google_container_cluster.zonal_primary.*.master_auth, []) } cluster_type_output_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.master_version, + [""], + ), + 0, + ) } cluster_type_output_min_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.min_master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.min_master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.min_master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.min_master_version, + [""], + ), + 0, + ) } cluster_type_output_logging_service = { - regional = "${element(concat(google_container_cluster.primary.*.logging_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.logging_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.logging_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.logging_service, + [""], + ), + 0, + ) } cluster_type_output_monitoring_service = { - regional = "${element(concat(google_container_cluster.primary.*.monitoring_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.monitoring_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.monitoring_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.monitoring_service, + [""], + ), + 0, + ) } cluster_type_output_network_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_http_load_balancing_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_horizontal_pod_autoscaling_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_kubernetes_dashboard_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) } # BETA features cluster_type_output_istio_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.addons_config.0.istio_config.0.disabled, [""]), 0) } cluster_type_output_pod_security_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.pod_security_policy_config.0.enabled, [""]), 0) + zonal = element(concat(google_container_cluster.zonal_primary.*.pod_security_policy_config.0.enabled, [""]), 0) } - # /BETA features cluster_type_output_node_pools_names = { - regional = "${concat(google_container_node_pool.pools.*.name, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.name, list(""))}" + regional = concat(google_container_node_pool.pools.*.name, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.name, [""]) } + cluster_type_output_node_pools_versions = { - regional = "${concat(google_container_node_pool.pools.*.version, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.version, list(""))}" + regional = concat(google_container_node_pool.pools.*.version, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.version, [""]) } - cluster_master_auth_list_layer1 = "${local.cluster_type_output_master_auth[local.cluster_type]}" - cluster_master_auth_list_layer2 = "${local.cluster_master_auth_list_layer1[0]}" - cluster_master_auth_map = "${local.cluster_master_auth_list_layer2[0]}" + + cluster_master_auth_list_layer1 = local.cluster_type_output_master_auth[local.cluster_type] + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] # cluster locals - cluster_name = "${local.cluster_type_output_name[local.cluster_type]}" - cluster_location = "${local.cluster_type_output_location[local.cluster_type]}" - cluster_region = "${local.cluster_type_output_region[local.cluster_type]}" - cluster_zones = "${sort(local.cluster_type_output_zones[local.cluster_type])}" - cluster_endpoint = "${local.cluster_type_output_endpoint[local.cluster_type]}" - cluster_ca_certificate = "${lookup(local.cluster_master_auth_map, "cluster_ca_certificate")}" - cluster_master_version = "${local.cluster_type_output_master_version[local.cluster_type]}" - cluster_min_master_version = "${local.cluster_type_output_min_master_version[local.cluster_type]}" - cluster_logging_service = "${local.cluster_type_output_logging_service[local.cluster_type]}" - cluster_monitoring_service = "${local.cluster_type_output_monitoring_service[local.cluster_type]}" - cluster_node_pools_names = "${local.cluster_type_output_node_pools_names[local.cluster_type]}" - cluster_node_pools_versions = "${local.cluster_type_output_node_pools_versions[local.cluster_type]}" - cluster_network_policy_enabled = "${local.cluster_type_output_network_policy_enabled[local.cluster_type] ? false : true}" - cluster_http_load_balancing_enabled = "${local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] ? false : true}" - cluster_horizontal_pod_autoscaling_enabled = "${local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] ? false : true}" - cluster_kubernetes_dashboard_enabled = "${local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] ? false : true}" + cluster_name = local.cluster_type_output_name[local.cluster_type] + cluster_location = local.cluster_type_output_location[local.cluster_type] + cluster_region = local.cluster_type_output_region[local.cluster_type] + cluster_zones = sort(local.cluster_type_output_zones[local.cluster_type]) + cluster_endpoint = local.cluster_type_output_endpoint[local.cluster_type] + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_type_output_master_version[local.cluster_type] + cluster_min_master_version = local.cluster_type_output_min_master_version[local.cluster_type] + cluster_logging_service = local.cluster_type_output_logging_service[local.cluster_type] + cluster_monitoring_service = local.cluster_type_output_monitoring_service[local.cluster_type] + cluster_node_pools_names = local.cluster_type_output_node_pools_names[local.cluster_type] + cluster_node_pools_versions = local.cluster_type_output_node_pools_versions[local.cluster_type] + cluster_network_policy_enabled = ! local.cluster_type_output_network_policy_enabled[local.cluster_type] + cluster_http_load_balancing_enabled = ! local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] + cluster_kubernetes_dashboard_enabled = ! local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] # BETA features - cluster_istio_enabled = "${local.cluster_type_output_istio_enabled[local.cluster_type] ? false : true}" - cluster_cloudrun_enabled = "${var.cloudrun}" - cluster_pod_security_policy_enabled = "${local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] ? true : false}" - + cluster_istio_enabled = ! local.cluster_type_output_istio_enabled[local.cluster_type] + cluster_cloudrun_enabled = var.cloudrun + cluster_pod_security_policy_enabled = local.cluster_type_output_pod_security_policy_enabled[local.cluster_type] # /BETA features } @@ -194,9 +278,9 @@ locals { Get available container engine versions *****************************************/ data "google_container_engine_versions" "region" { - provider = "google-beta" - region = "${var.region}" - project = "${var.project_id}" + provider = google-beta + region = var.region + project = var.project_id } data "google_container_engine_versions" "zone" { @@ -204,7 +288,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = "${var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0]}" + zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] - project = "${var.project_id}" + project = var.project_id } diff --git a/modules/beta-public-cluster/masq.tf b/modules/beta-public-cluster/masq.tf index 3006578627..1e9dc7791d 100644 --- a/modules/beta-public-cluster/masq.tf +++ b/modules/beta-public-cluster/masq.tf @@ -20,18 +20,18 @@ Create ip-masq-agent confimap *****************************************/ resource "kubernetes_config_map" "ip-masq-agent" { - count = "${var.configure_ip_masq ? 1 : 0}" + count = var.configure_ip_masq ? 1 : 0 metadata { name = "ip-masq-agent" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { config = <
### example format ### master_authorized_networks_config = [{ cidr_blocks = [{ cidr_block = "10.0.0.0/8" display_name = "example_network" }], }] | list | `` | no | -| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | -| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | -| name | The name of the cluster (required) | string | n/a | yes | -| network | The VPC network to host the cluster in (required) | string | n/a | yes | -| network\_policy | Enable network policy addon | string | `"false"` | no | -| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | -| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | -| node\_pools | List of maps containing node pools | list | `` | no | -| node\_pools\_labels | Map of maps containing node labels by node-pool name | map | `` | no | -| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map | `` | no | -| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map | `` | no | -| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map | `` | no | -| node\_pools\_taints | Map of lists containing node taints by node-pool name | map | `` | no | -| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | -| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list | `` | no | -| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | -| region | The region to host the cluster in (required) | string | n/a | yes | -| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | string | `"true"` | no | -| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | string | `"false"` | no | -| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | -| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map | `` | no | -| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | -| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | -| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list | `` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| ca\_certificate | Cluster ca certificate (base64 encoded) | -| endpoint | Cluster endpoint | -| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | -| http\_load\_balancing\_enabled | Whether http load balancing enabled | -| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | -| location | Cluster location (region if regional cluster, zone if zonal cluster) | -| logging\_service | Logging service used | -| master\_authorized\_networks\_config | Networks from which access to master is permitted | -| master\_version | Current master kubernetes version | -| min\_master\_version | Minimum master kubernetes version | -| monitoring\_service | Monitoring service used | -| name | Cluster name | -| network\_policy\_enabled | Whether network policy enabled | -| node\_pools\_names | List of node pools names | -| node\_pools\_versions | List of node pools versions | -| region | Cluster region | -| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | -| type | Cluster type (regional / zonal) | -| zones | List of zones in which the cluster resides | - [^]: (autogen_docs_end) ## Requirements @@ -212,7 +136,7 @@ The [project factory](https://github.com/terraform-google-modules/terraform-goog #### Kubectl - [kubectl](https://github.com/kubernetes/kubernetes/releases) 1.9.x #### Terraform and Plugins -- [Terraform](https://www.terraform.io/downloads.html) 0.11.x +- [Terraform](https://www.terraform.io/downloads.html) 0.12 - [Terraform Provider for GCP Beta][terraform-provider-google-beta] v2.9 ### Configure a Service Account diff --git a/modules/private-cluster/auth.tf b/modules/private-cluster/auth.tf index 0bbafaf4a2..c177eee5a7 100644 --- a/modules/private-cluster/auth.tf +++ b/modules/private-cluster/auth.tf @@ -20,7 +20,7 @@ Retrieve authentication token *****************************************/ data "google_client_config" "default" { - provider = "google-beta" + provider = google-beta } /****************************************** @@ -29,6 +29,6 @@ data "google_client_config" "default" { provider "kubernetes" { load_config_file = false host = "https://${local.cluster_endpoint}" - token = "${data.google_client_config.default.access_token}" - cluster_ca_certificate = "${base64decode(local.cluster_ca_certificate)}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(local.cluster_ca_certificate) } diff --git a/modules/private-cluster/cluster_regional.tf b/modules/private-cluster/cluster_regional.tf index 141c9b9445..f42ab76dab 100644 --- a/modules/private-cluster/cluster_regional.tf +++ b/modules/private-cluster/cluster_regional.tf @@ -20,66 +20,91 @@ Create regional cluster *****************************************/ resource "google_container_cluster" "primary" { - provider = "google-beta" - count = "${var.regional ? 1 : 0}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - region = "${var.region}" - node_locations = ["${coalescelist(compact(var.zones), sort(random_shuffle.available_zones.result))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 1 : 0 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_regional}" + region = var.region - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + node_locations = coalescelist( + compact(var.zones), + sort(random_shuffle.available_zones.result), + ) - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link + + dynamic "network_policy" { + for_each = local.cluster_network_policy + + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_regional + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -90,71 +115,125 @@ resource "google_container_cluster" "primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create regional node pools *****************************************/ resource "google_container_node_pool" "pools" { - provider = "google-beta" - count = "${var.regional ? length(var.node_pools) : 0}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - region = "${var.region}" - cluster = "${google_container_cluster.primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_regional)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? length(var.node_pools) : 0 + name = var.node_pools[count.index]["name"] + project = var.project_id + region = var.region + cluster = google_container_cluster.primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_regional, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", true)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", true) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] - - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -165,16 +244,19 @@ resource "google_container_node_pool" "pools" { } resource "null_resource" "wait_for_regional_cluster" { - count = "${var.regional ? 1 : 0}" + count = var.regional ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.primary", "google_container_node_pool.pools"] + depends_on = [ + google_container_cluster.primary, + google_container_node_pool.pools, + ] } diff --git a/modules/private-cluster/cluster_zonal.tf b/modules/private-cluster/cluster_zonal.tf index 1ee89f7e93..9b44c6d478 100644 --- a/modules/private-cluster/cluster_zonal.tf +++ b/modules/private-cluster/cluster_zonal.tf @@ -20,66 +20,86 @@ Create zonal cluster *****************************************/ resource "google_container_cluster" "zonal_primary" { - provider = "google-beta" - count = "${var.regional ? 0 : 1}" - name = "${var.name}" - description = "${var.description}" - project = "${var.project_id}" + provider = google-beta - zone = "${var.zones[0]}" - node_locations = ["${slice(var.zones,1,length(var.zones))}"] - cluster_ipv4_cidr = "${var.cluster_ipv4_cidr}" - network = "${replace(data.google_compute_network.gke_network.self_link, "https://www.googleapis.com/compute/v1/", "")}" - network_policy = "${local.cluster_network_policy["${var.network_policy ? "enabled" : "disabled"}"]}" + count = var.regional ? 0 : 1 + name = var.name + description = var.description + project = var.project_id - subnetwork = "${replace(data.google_compute_subnetwork.gke_subnetwork.self_link, "https://www.googleapis.com/compute/v1/", "")}" - min_master_version = "${local.kubernetes_version_zonal}" + zone = var.zones[0] + node_locations = slice(var.zones, 1, length(var.zones)) + cluster_ipv4_cidr = var.cluster_ipv4_cidr + network = data.google_compute_network.gke_network.self_link - logging_service = "${var.logging_service}" - monitoring_service = "${var.monitoring_service}" + dynamic "network_policy" { + for_each = local.cluster_network_policy - master_authorized_networks_config = ["${var.master_authorized_networks_config}"] + content { + enabled = network_policy.value.enabled + provider = network_policy.value.provider + } + } + + subnetwork = data.google_compute_subnetwork.gke_subnetwork.self_link + min_master_version = local.kubernetes_version_zonal + + logging_service = var.logging_service + monitoring_service = var.monitoring_service + + dynamic "master_authorized_networks_config" { + for_each = var.master_authorized_networks_config + content { + dynamic "cidr_blocks" { + for_each = master_authorized_networks_config.value.cidr_blocks + content { + cidr_block = lookup(cidr_blocks.value, "cidr_block", "") + display_name = lookup(cidr_blocks.value, "display_name", "") + } + } + } + } master_auth { - username = "${var.basic_auth_username}" - password = "${var.basic_auth_password}" + username = var.basic_auth_username + password = var.basic_auth_password client_certificate_config { - issue_client_certificate = "${var.issue_client_certificate}" + issue_client_certificate = var.issue_client_certificate } } addons_config { http_load_balancing { - disabled = "${var.http_load_balancing ? 0 : 1}" + disabled = ! var.http_load_balancing } horizontal_pod_autoscaling { - disabled = "${var.horizontal_pod_autoscaling ? 0 : 1}" + disabled = ! var.horizontal_pod_autoscaling } kubernetes_dashboard { - disabled = "${var.kubernetes_dashboard ? 0 : 1}" + disabled = ! var.kubernetes_dashboard } network_policy_config { - disabled = "${var.network_policy ? 0 : 1}" + disabled = ! var.network_policy } } ip_allocation_policy { - cluster_secondary_range_name = "${var.ip_range_pods}" - services_secondary_range_name = "${var.ip_range_services}" + cluster_secondary_range_name = var.ip_range_pods + services_secondary_range_name = var.ip_range_services } maintenance_policy { daily_maintenance_window { - start_time = "${var.maintenance_start_time}" + start_time = var.maintenance_start_time } } lifecycle { - ignore_changes = ["node_pool"] + ignore_changes = [node_pool] } timeouts { @@ -90,71 +110,126 @@ resource "google_container_cluster" "zonal_primary" { node_pool { name = "default-pool" - initial_node_count = "${var.initial_node_count}" + initial_node_count = var.initial_node_count node_config { - service_account = "${lookup(var.node_pools[0], "service_account", local.service_account)}" + service_account = lookup(var.node_pools[0], "service_account", local.service_account) } } private_cluster_config { - enable_private_endpoint = "${var.enable_private_endpoint}" - enable_private_nodes = "${var.enable_private_nodes}" - master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + enable_private_endpoint = var.enable_private_endpoint + enable_private_nodes = var.enable_private_nodes + master_ipv4_cidr_block = var.master_ipv4_cidr_block } - remove_default_node_pool = "${var.remove_default_node_pool}" + remove_default_node_pool = var.remove_default_node_pool } /****************************************** Create zonal node pools *****************************************/ resource "google_container_node_pool" "zonal_pools" { - provider = "google-beta" - count = "${var.regional ? 0 : length(var.node_pools)}" - name = "${lookup(var.node_pools[count.index], "name")}" - project = "${var.project_id}" - zone = "${var.zones[0]}" - cluster = "${google_container_cluster.zonal_primary.name}" - version = "${lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup(var.node_pools[count.index], "version", local.node_version_zonal)}" - initial_node_count = "${lookup(var.node_pools[count.index], "initial_node_count", lookup(var.node_pools[count.index], "min_count", 1))}" + provider = google-beta + count = var.regional ? 0 : length(var.node_pools) + name = var.node_pools[count.index]["name"] + project = var.project_id + zone = var.zones[0] + cluster = google_container_cluster.zonal_primary[0].name + version = lookup(var.node_pools[count.index], "auto_upgrade", false) ? "" : lookup( + var.node_pools[count.index], + "version", + local.node_version_zonal, + ) + initial_node_count = lookup( + var.node_pools[count.index], + "initial_node_count", + lookup(var.node_pools[count.index], "min_count", 1), + ) autoscaling { - min_node_count = "${lookup(var.node_pools[count.index], "min_count", 1)}" - max_node_count = "${lookup(var.node_pools[count.index], "max_count", 100)}" + min_node_count = lookup(var.node_pools[count.index], "min_count", 1) + max_node_count = lookup(var.node_pools[count.index], "max_count", 100) } management { - auto_repair = "${lookup(var.node_pools[count.index], "auto_repair", true)}" - auto_upgrade = "${lookup(var.node_pools[count.index], "auto_upgrade", false)}" + auto_repair = lookup(var.node_pools[count.index], "auto_repair", true) + auto_upgrade = lookup(var.node_pools[count.index], "auto_upgrade", false) } node_config { - image_type = "${lookup(var.node_pools[count.index], "image_type", "COS")}" - machine_type = "${lookup(var.node_pools[count.index], "machine_type", "n1-standard-2")}" - labels = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_labels["all"], var.node_pools_labels[lookup(var.node_pools[count.index], "name")])}" - metadata = "${merge(map("cluster_name", var.name), map("node_pool", lookup(var.node_pools[count.index], "name")), var.node_pools_metadata["all"], var.node_pools_metadata[lookup(var.node_pools[count.index], "name")], map("disable-legacy-endpoints", var.disable_legacy_metadata_endpoints))}" - taint = "${concat(var.node_pools_taints["all"], var.node_pools_taints[lookup(var.node_pools[count.index], "name")])}" - tags = ["${concat(list("gke-${var.name}"), list("gke-${var.name}-${lookup(var.node_pools[count.index], "name")}"), var.node_pools_tags["all"], var.node_pools_tags[lookup(var.node_pools[count.index], "name")])}"] - - disk_size_gb = "${lookup(var.node_pools[count.index], "disk_size_gb", 100)}" - disk_type = "${lookup(var.node_pools[count.index], "disk_type", "pd-standard")}" - service_account = "${lookup(var.node_pools[count.index], "service_account", local.service_account)}" - preemptible = "${lookup(var.node_pools[count.index], "preemptible", false)}" - - oauth_scopes = [ - "${concat(var.node_pools_oauth_scopes["all"], - var.node_pools_oauth_scopes[lookup(var.node_pools[count.index], "name")])}", - ] - - guest_accelerator { - type = "${lookup(var.node_pools[count.index], "accelerator_type", "")}" - count = "${lookup(var.node_pools[count.index], "accelerator_count", 0)}" + image_type = lookup(var.node_pools[count.index], "image_type", "COS") + machine_type = lookup(var.node_pools[count.index], "machine_type", "n1-standard-2") + labels = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_labels["all"], + var.node_pools_labels[var.node_pools[count.index]["name"]], + ) + metadata = merge( + { + "cluster_name" = var.name + }, + { + "node_pool" = var.node_pools[count.index]["name"] + }, + var.node_pools_metadata["all"], + var.node_pools_metadata[var.node_pools[count.index]["name"]], + { + "disable-legacy-endpoints" = var.disable_legacy_metadata_endpoints + }, + ) + dynamic "taint" { + for_each = concat( + var.node_pools_taints["all"], + var.node_pools_taints[var.node_pools[count.index]["name"]], + ) + content { + effect = taint.value.effect + key = taint.value.key + value = taint.value.value + } + } + + tags = concat( + ["gke-${var.name}"], + ["gke-${var.name}-${var.node_pools[count.index]["name"]}"], + var.node_pools_tags["all"], + var.node_pools_tags[var.node_pools[count.index]["name"]], + ) + + disk_size_gb = lookup(var.node_pools[count.index], "disk_size_gb", 100) + disk_type = lookup(var.node_pools[count.index], "disk_type", "pd-standard") + service_account = lookup( + var.node_pools[count.index], + "service_account", + local.service_account, + ) + preemptible = lookup(var.node_pools[count.index], "preemptible", false) + + oauth_scopes = concat( + var.node_pools_oauth_scopes["all"], + var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], + ) + + dynamic "guest_accelerator" { + for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + type = lookup(var.node_pools[count.index], "accelerator_type", "") + count = lookup(var.node_pools[count.index], "accelerator_count", 0) + }] : [] + content { + type = guest_accelerator.value.type + count = guest_accelerator.value.count + } } } lifecycle { - ignore_changes = ["initial_node_count"] + ignore_changes = [initial_node_count] } timeouts { @@ -165,16 +240,19 @@ resource "google_container_node_pool" "zonal_pools" { } resource "null_resource" "wait_for_zonal_cluster" { - count = "${var.regional ? 0 : 1}" + count = var.regional ? 0 : 1 provisioner "local-exec" { command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } provisioner "local-exec" { - when = "destroy" + when = destroy command = "${path.module}/scripts/wait-for-cluster.sh ${var.project_id} ${var.name}" } - depends_on = ["google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } diff --git a/modules/private-cluster/dns.tf b/modules/private-cluster/dns.tf index 91b41efac4..7138473ded 100644 --- a/modules/private-cluster/dns.tf +++ b/modules/private-cluster/dns.tf @@ -20,73 +20,94 @@ Delete default kube-dns configmap *****************************************/ resource "null_resource" "delete_default_kube_dns_configmap" { - count = "${local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config || local.upstream_nameservers_config ? 1 : 0 provisioner "local-exec" { command = "${path.module}/scripts/kubectl_wrapper.sh https://${local.cluster_endpoint} ${data.google_client_config.default.access_token} ${local.cluster_ca_certificate} ${path.module}/scripts/delete-default-resource.sh kube-system configmap kube-dns" } - depends_on = ["data.google_client_config.default", "google_container_cluster.primary", "google_container_node_pool.pools", "google_container_cluster.zonal_primary", "google_container_node_pool.zonal_pools"] + depends_on = [ + data.google_client_config.default, + google_container_cluster.primary, + google_container_node_pool.pools, + google_container_cluster.zonal_primary, + google_container_node_pool.zonal_pools, + ] } /****************************************** Create kube-dns confimap *****************************************/ resource "kubernetes_config_map" "kube-dns" { - count = "${local.custom_kube_dns_config && !local.upstream_nameservers_config ? 1 : 0}" + count = local.custom_kube_dns_config && ! local.upstream_nameservers_config ? 1 : 0 metadata { name = "kube-dns" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { stubDomains = < 0 + upstream_nameservers_config = length(var.upstream_nameservers) > 0 + network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + + cluster_type = var.regional ? "regional" : "zonal" + + cluster_network_policy = var.network_policy ? [{ + enabled = true + provider = var.network_policy_provider + }] : [{ + enabled = false + provider = null + }] cluster_type_output_name = { - regional = "${element(concat(google_container_cluster.primary.*.name, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.name, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.name, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.name, [""]), + 0, + ) } cluster_type_output_location = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.zone, list("")), 0)}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = element( + concat(google_container_cluster.zonal_primary.*.zone, [""]), + 0, + ) } cluster_type_output_region = { - regional = "${element(concat(google_container_cluster.primary.*.region, list("")), 0)}" - zonal = "${var.region}" + regional = element(concat(google_container_cluster.primary.*.region, [""]), 0) + zonal = var.region } - cluster_type_output_regional_zones = "${flatten(google_container_cluster.primary.*.node_locations)}" - cluster_type_output_zonal_zones = "${slice(var.zones, 1, length(var.zones))}" + cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) + cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) cluster_type_output_zones = { - regional = "${local.cluster_type_output_regional_zones}" - zonal = "${concat(google_container_cluster.zonal_primary.*.zone, local.cluster_type_output_zonal_zones)}" + regional = local.cluster_type_output_regional_zones + zonal = concat( + google_container_cluster.zonal_primary.*.zone, + local.cluster_type_output_zonal_zones, + ) } cluster_type_output_endpoint = { - regional = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.primary.*.endpoint, list("")), 0) - }" + regional = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.primary.*.endpoint, [""]), 0) - zonal = "${ - var.deploy_using_private_endpoint ? - element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, list("")), 0) : - element(concat(google_container_cluster.zonal_primary.*.endpoint, list("")), 0) - }" + zonal = var.deploy_using_private_endpoint ? element(concat(google_container_cluster.zonal_primary.*.private_cluster_config.0.private_endpoint, [""]), 0) : element(concat(google_container_cluster.zonal_primary.*.endpoint, [""]), 0) } cluster_type_output_master_auth = { - regional = "${concat(google_container_cluster.primary.*.master_auth, list())}" - zonal = "${concat(google_container_cluster.zonal_primary.*.master_auth, list())}" + regional = concat(google_container_cluster.primary.*.master_auth, []) + zonal = concat(google_container_cluster.zonal_primary.*.master_auth, []) } cluster_type_output_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.master_version, + [""], + ), + 0, + ) } cluster_type_output_min_master_version = { - regional = "${element(concat(google_container_cluster.primary.*.min_master_version, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.min_master_version, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.min_master_version, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.min_master_version, + [""], + ), + 0, + ) } cluster_type_output_logging_service = { - regional = "${element(concat(google_container_cluster.primary.*.logging_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.logging_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.logging_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.logging_service, + [""], + ), + 0, + ) } cluster_type_output_monitoring_service = { - regional = "${element(concat(google_container_cluster.primary.*.monitoring_service, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.monitoring_service, list("")), 0)}" + regional = element( + concat(google_container_cluster.primary.*.monitoring_service, [""]), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.monitoring_service, + [""], + ), + 0, + ) } cluster_type_output_network_policy_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.network_policy_config.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_http_load_balancing_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.http_load_balancing.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_horizontal_pod_autoscaling_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.horizontal_pod_autoscaling.0.disabled, + [""], + ), + 0, + ) } cluster_type_output_kubernetes_dashboard_enabled = { - regional = "${element(concat(google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" - zonal = "${element(concat(google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, list("")), 0)}" + regional = element( + concat( + google_container_cluster.primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) + zonal = element( + concat( + google_container_cluster.zonal_primary.*.addons_config.0.kubernetes_dashboard.0.disabled, + [""], + ), + 0, + ) } + cluster_type_output_node_pools_names = { - regional = "${concat(google_container_node_pool.pools.*.name, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.name, list(""))}" + regional = concat(google_container_node_pool.pools.*.name, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.name, [""]) } cluster_type_output_node_pools_versions = { - regional = "${concat(google_container_node_pool.pools.*.version, list(""))}" - zonal = "${concat(google_container_node_pool.zonal_pools.*.version, list(""))}" + regional = concat(google_container_node_pool.pools.*.version, [""]) + zonal = concat(google_container_node_pool.zonal_pools.*.version, [""]) } - cluster_master_auth_list_layer1 = "${local.cluster_type_output_master_auth[local.cluster_type]}" - cluster_master_auth_list_layer2 = "${local.cluster_master_auth_list_layer1[0]}" - cluster_master_auth_map = "${local.cluster_master_auth_list_layer2[0]}" - + cluster_master_auth_list_layer1 = local.cluster_type_output_master_auth[local.cluster_type] + cluster_master_auth_list_layer2 = local.cluster_master_auth_list_layer1[0] + cluster_master_auth_map = local.cluster_master_auth_list_layer2[0] # cluster locals - cluster_name = "${local.cluster_type_output_name[local.cluster_type]}" - cluster_location = "${local.cluster_type_output_location[local.cluster_type]}" - cluster_region = "${local.cluster_type_output_region[local.cluster_type]}" - cluster_zones = "${sort(local.cluster_type_output_zones[local.cluster_type])}" - cluster_endpoint = "${local.cluster_type_output_endpoint[local.cluster_type]}" - cluster_ca_certificate = "${lookup(local.cluster_master_auth_map, "cluster_ca_certificate")}" - cluster_master_version = "${local.cluster_type_output_master_version[local.cluster_type]}" - cluster_min_master_version = "${local.cluster_type_output_min_master_version[local.cluster_type]}" - cluster_logging_service = "${local.cluster_type_output_logging_service[local.cluster_type]}" - cluster_monitoring_service = "${local.cluster_type_output_monitoring_service[local.cluster_type]}" - cluster_node_pools_names = "${local.cluster_type_output_node_pools_names[local.cluster_type]}" - cluster_node_pools_versions = "${local.cluster_type_output_node_pools_versions[local.cluster_type]}" - cluster_network_policy_enabled = "${local.cluster_type_output_network_policy_enabled[local.cluster_type] ? false : true}" - cluster_http_load_balancing_enabled = "${local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] ? false : true}" - cluster_horizontal_pod_autoscaling_enabled = "${local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] ? false : true}" - cluster_kubernetes_dashboard_enabled = "${local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] ? false : true}" + cluster_name = local.cluster_type_output_name[local.cluster_type] + cluster_location = local.cluster_type_output_location[local.cluster_type] + cluster_region = local.cluster_type_output_region[local.cluster_type] + cluster_zones = sort(local.cluster_type_output_zones[local.cluster_type]) + cluster_endpoint = local.cluster_type_output_endpoint[local.cluster_type] + cluster_ca_certificate = local.cluster_master_auth_map["cluster_ca_certificate"] + cluster_master_version = local.cluster_type_output_master_version[local.cluster_type] + cluster_min_master_version = local.cluster_type_output_min_master_version[local.cluster_type] + cluster_logging_service = local.cluster_type_output_logging_service[local.cluster_type] + cluster_monitoring_service = local.cluster_type_output_monitoring_service[local.cluster_type] + cluster_node_pools_names = local.cluster_type_output_node_pools_names[local.cluster_type] + cluster_node_pools_versions = local.cluster_type_output_node_pools_versions[local.cluster_type] + cluster_network_policy_enabled = ! local.cluster_type_output_network_policy_enabled[local.cluster_type] + cluster_http_load_balancing_enabled = ! local.cluster_type_output_http_load_balancing_enabled[local.cluster_type] + cluster_horizontal_pod_autoscaling_enabled = ! local.cluster_type_output_horizontal_pod_autoscaling_enabled[local.cluster_type] + cluster_kubernetes_dashboard_enabled = ! local.cluster_type_output_kubernetes_dashboard_enabled[local.cluster_type] } /****************************************** Get available container engine versions *****************************************/ data "google_container_engine_versions" "region" { - provider = "google-beta" - region = "${var.region}" - project = "${var.project_id}" + provider = google-beta + region = var.region + project = var.project_id } data "google_container_engine_versions" "zone" { @@ -189,7 +264,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = "${var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0]}" + zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] - project = "${var.project_id}" + project = var.project_id } diff --git a/modules/private-cluster/masq.tf b/modules/private-cluster/masq.tf index 3006578627..1e9dc7791d 100644 --- a/modules/private-cluster/masq.tf +++ b/modules/private-cluster/masq.tf @@ -20,18 +20,18 @@ Create ip-masq-agent confimap *****************************************/ resource "kubernetes_config_map" "ip-masq-agent" { - count = "${var.configure_ip_masq ? 1 : 0}" + count = var.configure_ip_masq ? 1 : 0 metadata { name = "ip-masq-agent" namespace = "kube-system" - labels { + labels = { maintained_by = "terraform" } } - data { + data = { config = < Date: Tue, 18 Jun 2019 14:52:17 -0400 Subject: [PATCH 02/16] Fix formatting --- autogen/cluster_regional.tf | 3 +-- cluster_regional.tf | 2 -- modules/beta-private-cluster/cluster_regional.tf | 1 - modules/beta-public-cluster/cluster_regional.tf | 2 -- modules/private-cluster/cluster_regional.tf | 1 - 5 files changed, 1 insertion(+), 8 deletions(-) diff --git a/autogen/cluster_regional.tf b/autogen/cluster_regional.tf index c4354f92ca..96ff41bc5f 100644 --- a/autogen/cluster_regional.tf +++ b/autogen/cluster_regional.tf @@ -32,7 +32,6 @@ resource "google_container_cluster" "primary" { project = var.project_id region = var.region - node_locations = coalescelist( compact(var.zones), sort(random_shuffle.available_zones.result), @@ -167,8 +166,8 @@ resource "google_container_cluster" "primary" { enable_private_nodes = var.enable_private_nodes master_ipv4_cidr_block = var.master_ipv4_cidr_block } -{% endif %} +{% endif %} remove_default_node_pool = var.remove_default_node_pool {% if beta_cluster %} diff --git a/cluster_regional.tf b/cluster_regional.tf index 06798fdd38..b23b25ef08 100644 --- a/cluster_regional.tf +++ b/cluster_regional.tf @@ -28,7 +28,6 @@ resource "google_container_cluster" "primary" { project = var.project_id region = var.region - node_locations = coalescelist( compact(var.zones), sort(random_shuffle.available_zones.result), @@ -122,7 +121,6 @@ resource "google_container_cluster" "primary" { } } - remove_default_node_pool = var.remove_default_node_pool } diff --git a/modules/beta-private-cluster/cluster_regional.tf b/modules/beta-private-cluster/cluster_regional.tf index d26d9d9df9..87eb56365e 100644 --- a/modules/beta-private-cluster/cluster_regional.tf +++ b/modules/beta-private-cluster/cluster_regional.tf @@ -28,7 +28,6 @@ resource "google_container_cluster" "primary" { project = var.project_id region = var.region - node_locations = coalescelist( compact(var.zones), sort(random_shuffle.available_zones.result), diff --git a/modules/beta-public-cluster/cluster_regional.tf b/modules/beta-public-cluster/cluster_regional.tf index af21dc605a..bee29c5c00 100644 --- a/modules/beta-public-cluster/cluster_regional.tf +++ b/modules/beta-public-cluster/cluster_regional.tf @@ -28,7 +28,6 @@ resource "google_container_cluster" "primary" { project = var.project_id region = var.region - node_locations = coalescelist( compact(var.zones), sort(random_shuffle.available_zones.result), @@ -151,7 +150,6 @@ resource "google_container_cluster" "primary" { } } - remove_default_node_pool = var.remove_default_node_pool dynamic "database_encryption" { diff --git a/modules/private-cluster/cluster_regional.tf b/modules/private-cluster/cluster_regional.tf index f42ab76dab..f3ed67b0a4 100644 --- a/modules/private-cluster/cluster_regional.tf +++ b/modules/private-cluster/cluster_regional.tf @@ -28,7 +28,6 @@ resource "google_container_cluster" "primary" { project = var.project_id region = var.region - node_locations = coalescelist( compact(var.zones), sort(random_shuffle.available_zones.result), From 962d4670f6fe1a306d7a28597c5f18df4652895f Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Tue, 18 Jun 2019 15:01:13 -0400 Subject: [PATCH 03/16] Exclude shared fixture from validation --- test/make.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/make.sh b/test/make.sh index b75e42375f..acb25d9afe 100755 --- a/test/make.sh +++ b/test/make.sh @@ -45,11 +45,12 @@ function docker() { # files ending in '.tf' function check_terraform() { echo "Running terraform validate" - #shellcheck disable=SC2156 - find . -name "*.tf" -not -path "./autogen/*" -not -path "./test/fixtures/shared/*" -not -path "./test/fixtures/all_examples/*" -exec bash -c 'terraform validate --check-variables=false $(dirname "{}")' \; - echo "Running terraform fmt" - #shellcheck disable=SC2156 - find . -name "*.tf" -not -path "./autogen/*" -not -path "./test/fixtures/shared/*" -not -path "./test/fixtures/all_examples/*" -exec bash -c 'terraform fmt -check=true -write=false "{}"' \; + find . -name "*.tf" \ + -not -path "./autogen/*" \ + -not -path "./test/fixtures/all_examples/*" \ + -not -path "./test/fixtures/shared/*" \ + -print0 \ + | xargs -0 dirname | sort | uniq | xargs -L 1 -i{} bash -c 'terraform init "{}" > /dev/null && terraform validate "{}" && terraform fmt -check=true -write=false "{}"' } # This function runs 'go fmt' and 'go vet' on every file From eb0a0e77bcfd84d3581d76b36b338ecc1ff0a96e Mon Sep 17 00:00:00 2001 From: Ivan Kornienko Date: Wed, 26 Jun 2019 20:50:11 +0300 Subject: [PATCH 04/16] Fixed doc generator --- README.md | 72 ++++++- autogen/README.md | 4 +- autogen/variables.tf | 104 +++++----- examples/deploy_service/README.md | 5 +- examples/disable_client_cert/README.md | 5 +- examples/node_pool/README.md | 7 +- examples/shared_vpc/README.md | 5 +- examples/simple_regional/README.md | 5 +- examples/simple_regional_private/README.md | 5 +- examples/simple_zonal/README.md | 7 +- examples/simple_zonal_private/README.md | 7 +- examples/stub_domains/README.md | 5 +- examples/stub_domains_private/README.md | 5 +- helpers/combine_docfiles.py | 58 ------ modules/beta-private-cluster/README.md | 4 +- modules/beta-private-cluster/variables.tf | 218 ++++++++++----------- modules/beta-public-cluster/README.md | 4 +- modules/beta-public-cluster/variables.tf | 202 +++++++++---------- modules/private-cluster/README.md | 79 +++++++- modules/private-cluster/variables.tf | 185 +++++++++-------- test/make.sh | 11 +- variables.tf | 147 +++++++------- 22 files changed, 611 insertions(+), 533 deletions(-) delete mode 100644 helpers/combine_docfiles.py diff --git a/README.md b/README.md index ec8cb68ce8..b27cd9f796 100644 --- a/README.md +++ b/README.md @@ -113,8 +113,76 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. -[^]: (autogen_docs_start) -[^]: (autogen_docs_end) + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) | object | `` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + + ## Requirements diff --git a/autogen/README.md b/autogen/README.md index e66c920a78..0e5d22cfe5 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -127,8 +127,8 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. -[^]: (autogen_docs_start) -[^]: (autogen_docs_end) + + ## Requirements diff --git a/autogen/variables.tf b/autogen/variables.tf index 7e47e7f7bd..d4a30d86e3 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -77,25 +77,6 @@ variable "node_version" { default = "" } -variable "master_authorized_networks_config" { - type = list(object({cidr_blocks=list(object({cidr_block=string,display_name=string}))})) - - description = < ## Inputs | Name | Description | Type | Default | Required | @@ -42,7 +41,7 @@ It will: | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/disable_client_cert/README.md b/examples/disable_client_cert/README.md index 929ba1926e..14dd6545c0 100644 --- a/examples/disable_client_cert/README.md +++ b/examples/disable_client_cert/README.md @@ -5,8 +5,7 @@ This example illustrates how to create a simple cluster and disable deprecated s * basic auth * client certificate -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -41,7 +40,7 @@ This example illustrates how to create a simple cluster and disable deprecated s | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/node_pool/README.md b/examples/node_pool/README.md index 2146e2c53f..9215f091cb 100644 --- a/examples/node_pool/README.md +++ b/examples/node_pool/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a cluster with multiple custom node-pool configurations with node labels, taints, and network tags. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -16,7 +15,7 @@ This example illustrates how to create a cluster with multiple custom node-pool | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | -| zones | The zone to host the cluster in (required if is a zonal cluster) | list | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | ## Outputs @@ -37,7 +36,7 @@ This example illustrates how to create a cluster with multiple custom node-pool | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/shared_vpc/README.md b/examples/shared_vpc/README.md index d66cdd5fc4..3b0f5a6157 100644 --- a/examples/shared_vpc/README.md +++ b/examples/shared_vpc/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple cluster where the host network is not necessarily in the same project as the cluster. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -37,7 +36,7 @@ This example illustrates how to create a simple cluster where the host network i | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_regional/README.md b/examples/simple_regional/README.md index 741c40c32b..fb209e47b5 100644 --- a/examples/simple_regional/README.md +++ b/examples/simple_regional/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple cluster. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -36,7 +35,7 @@ This example illustrates how to create a simple cluster. | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_regional_private/README.md b/examples/simple_regional_private/README.md index 9987f9dad0..8175482731 100644 --- a/examples/simple_regional_private/README.md +++ b/examples/simple_regional_private/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple private cluster. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -36,7 +35,7 @@ This example illustrates how to create a simple private cluster. | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_zonal/README.md b/examples/simple_zonal/README.md index 229fde7028..691f95c719 100644 --- a/examples/simple_zonal/README.md +++ b/examples/simple_zonal/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple cluster. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -15,7 +14,7 @@ This example illustrates how to create a simple cluster. | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | -| zones | The zone to host the cluster in (required if is a zonal cluster) | list | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | ## Outputs @@ -36,7 +35,7 @@ This example illustrates how to create a simple cluster. | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/simple_zonal_private/README.md b/examples/simple_zonal_private/README.md index 0e2c56ff7a..e576800d72 100644 --- a/examples/simple_zonal_private/README.md +++ b/examples/simple_zonal_private/README.md @@ -2,8 +2,7 @@ This example illustrates how to create a simple private cluster. -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -16,7 +15,7 @@ This example illustrates how to create a simple private cluster. | project\_id | The project ID to host the cluster in | string | n/a | yes | | region | The region to host the cluster in | string | n/a | yes | | subnetwork | The subnetwork to host the cluster in | string | n/a | yes | -| zones | The zone to host the cluster in (required if is a zonal cluster) | list | n/a | yes | +| zones | The zone to host the cluster in (required if is a zonal cluster) | list(string) | n/a | yes | ## Outputs @@ -37,7 +36,7 @@ This example illustrates how to create a simple private cluster. | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/stub_domains/README.md b/examples/stub_domains/README.md index 4c380ada81..126a1cd54c 100644 --- a/examples/stub_domains/README.md +++ b/examples/stub_domains/README.md @@ -7,8 +7,7 @@ It will: - Remove the default kube-dns configmap - Add a new kube-dns configmap with custom stub domains -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -41,7 +40,7 @@ It will: | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: - `terraform init` to get the plugins diff --git a/examples/stub_domains_private/README.md b/examples/stub_domains_private/README.md index 469ae9f259..ee4b89fa7f 100644 --- a/examples/stub_domains_private/README.md +++ b/examples/stub_domains_private/README.md @@ -9,8 +9,7 @@ It will: - Remove the default kube-dns configmap - Add a new kube-dns configmap with custom stub domains -[^]: (autogen_docs_start) - + ## Inputs | Name | Description | Type | Default | Required | @@ -43,7 +42,7 @@ It will: | subnetwork | | | zones | List of zones in which the cluster resides | -[^]: (autogen_docs_end) + To provision this example, run the following from within this directory: diff --git a/helpers/combine_docfiles.py b/helpers/combine_docfiles.py deleted file mode 100644 index 8f13487efc..0000000000 --- a/helpers/combine_docfiles.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Combine file from: - * script argument 1 - with content of file from: - * script argument 2 - using the beginning of line separators - hardcoded using regexes in this file: - - We exclude any text using the separate - regex specified here -''' - -import os -import re -import sys - -insert_separator_regex = r'(.*?\[\^\]\:\ \(autogen_docs_start\))(.*?)(\n\[\^\]\:\ \(autogen_docs_end\).*?$)' # noqa: E501 -exclude_separator_regex = r'(.*?)Copyright 20\d\d Google LLC.*?limitations under the License.(.*?)$' # noqa: E501 - -if len(sys.argv) != 3: - sys.exit(1) - -if not os.path.isfile(sys.argv[1]): - sys.exit(0) - -input = open(sys.argv[1], "r").read() -replace_content = open(sys.argv[2], "r").read() - -# Exclude the specified content from the replacement content -matched = re.match( - exclude_separator_regex, - replace_content, - re.DOTALL -) - -if matched: - groups = matched.groups(0) - replace_content = groups[0] + groups[1] - - # Find where to put the replacement content, overwrite the input file - groups = re.match(insert_separator_regex, input, re.DOTALL).groups(0) - output = groups[0] + replace_content + groups[2] + "\n" - open(sys.argv[1], "w").write(output) diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index af6b19b1f7..f113d12cd5 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -120,8 +120,8 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. -[^]: (autogen_docs_start) -[^]: (autogen_docs_end) + + ## Requirements diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index 05e380de6f..cad96210d3 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -77,91 +77,72 @@ variable "node_version" { default = "" } -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = < + ## Requirements diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 5f58962d88..1bacd15ffb 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -77,91 +77,72 @@ variable "node_version" { default = "" } -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = < +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | object | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + + ## Requirements diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index 5b86da9e6e..da768662a2 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -77,91 +77,72 @@ variable "node_version" { default = "" } -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = <"$TMPFILE" - python helpers/combine_docfiles.py "$j"/README.md "$TMPFILE" + if [[ -e "${j}/README.md" ]]; then + # script seem to be designed to work into current directory + cd $j && echo "Working in ${j} ..." + terraform_docs.sh . && echo Success! || echo "Warning! Exit code: ${?}" + cd - >/dev/null + else + echo "Skipping ${j} because README.md does not exist." + fi done - rm -f "$TMPFILE" } function check_generate() { diff --git a/variables.tf b/variables.tf index 8252ecb1aa..1100d5c101 100644 --- a/variables.tf +++ b/variables.tf @@ -77,91 +77,72 @@ variable "node_version" { default = "" } -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = < Date: Tue, 9 Jul 2019 15:07:03 -0400 Subject: [PATCH 05/16] Remove heredocs from variable descriptions Heredocs break terraform_docs.sh --- autogen/variables.tf | 49 +++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/autogen/variables.tf b/autogen/variables.tf index d4a30d86e3..ef0bea9060 100644 --- a/autogen/variables.tf +++ b/autogen/variables.tf @@ -77,6 +77,12 @@ variable "node_version" { default = "" } +variable "master_authorized_networks_config" { + type = list(object({cidr_blocks = list(object({cidr_block = string, display_name = string}))})) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + variable "horizontal_pod_autoscaling" { type = bool description = "Enable horizontal pod autoscaling addon" @@ -277,6 +283,7 @@ variable "cluster_ipv4_cidr" { default = "" description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." } + {% if private_cluster %} variable "deploy_using_private_endpoint" { @@ -310,6 +317,15 @@ variable "istio" { default = false } +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({state = string, key_name = string})) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + variable "cloudrun" { description = "(Beta) Enable CloudRun addon" default = false @@ -331,37 +347,4 @@ variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" default = "UNSPECIFIED" } - -variable "database_encryption" { - description = <<-EOF - Application-layer Secrets Encryption settings. Example: - database_encryption = [{ - state = "ENCRYPTED", - key_name = "projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key" - }] - EOF - type = "list" - default = [{ - state = "DECRYPTED" - key_name = "" - }] -} {% endif %} - -variable "master_authorized_networks_config" { - type = list(object({cidr_blocks = list(object({cidr_block = string, display_name = string}))})) - - description = <<-EOF - The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) - - ### example format ### - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "10.0.0.0/8" - display_name = "example_network" - }], - }] - EOF - - default = [] -} From b88212fcb114e22361909710e5183766d346c4d9 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Tue, 9 Jul 2019 15:50:10 -0400 Subject: [PATCH 06/16] Regenerate modules --- README.md | 5 +- modules/beta-private-cluster/README.md | 84 +++++++++++++++++++++++ modules/beta-private-cluster/variables.tf | 49 +++++-------- modules/beta-public-cluster/README.md | 80 +++++++++++++++++++++ modules/beta-public-cluster/variables.tf | 49 +++++-------- modules/private-cluster/README.md | 8 +-- modules/private-cluster/variables.tf | 25 ++----- variables.tf | 23 ++----- 8 files changed, 217 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index b27cd9f796..5dd2adccbf 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,8 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o |------|-------------|:----:|:-----:|:-----:| | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -134,7 +136,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) | object | `` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | | network | The VPC network to host the cluster in (required) | string | n/a | yes | @@ -156,6 +158,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | ## Outputs diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index f113d12cd5..495cd14289 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -121,6 +121,90 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | +| enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + ## Requirements diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index cad96210d3..04d202cd46 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -77,6 +77,12 @@ variable "node_version" { default = "" } +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + variable "horizontal_pod_autoscaling" { type = bool description = "Enable horizontal pod autoscaling addon" @@ -278,6 +284,7 @@ variable "cluster_ipv4_cidr" { description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." } + variable "deploy_using_private_endpoint" { type = bool description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." @@ -307,6 +314,15 @@ variable "istio" { default = false } +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + variable "cloudrun" { description = "(Beta) Enable CloudRun addon" default = false @@ -328,36 +344,3 @@ variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" default = "UNSPECIFIED" } - -variable "database_encryption" { - description = <<-EOF - Application-layer Secrets Encryption settings. Example: - database_encryption = [{ - state = "ENCRYPTED", - key_name = "projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key" - }] - EOF - type = "list" - default = [{ - state = "DECRYPTED" - key_name = "" - }] -} - -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = <<-EOF - The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) - - ### example format ### - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "10.0.0.0/8" - display_name = "example_network" - }], - }] - EOF - - default = [] -} diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 6dfc1361fb..915e779c88 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -116,6 +116,86 @@ Version 1.0.0 of this module introduces a breaking change: adding the `disable-l In either case, upgrading to module version `v1.0.0` will trigger a recreation of all node pools in the cluster. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|:----:|:-----:|:-----:| +| basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | +| basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cloudrun | (Beta) Enable CloudRun addon | string | `"false"` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | +| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key. | object | `` | no | +| description | The description of the cluster | string | `""` | no | +| disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | +| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | +| horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | +| http\_load\_balancing | Enable httpload balancer addon | bool | `"true"` | no | +| initial\_node\_count | The number of nodes to create in this cluster's default node pool. | number | `"0"` | no | +| ip\_masq\_link\_local | Whether to masquerade traffic to the link-local prefix (169.254.0.0/16). | bool | `"false"` | no | +| ip\_masq\_resync\_interval | The interval at which the agent attempts to sync its ConfigMap file from the disk. | string | `"60s"` | no | +| ip\_range\_pods | The _name_ of the secondary subnet ip range to use for pods | string | n/a | yes | +| ip\_range\_services | The _name_ of the secondary subnet range to use for services | string | n/a | yes | +| issue\_client\_certificate | Issues a client certificate to authenticate to the cluster endpoint. To maximize the security of your cluster, leave this option disabled. Client certificates don't automatically rotate and aren't easily revocable. WARNING: changing this after cluster creation is destructive! | bool | `"false"` | no | +| istio | (Beta) Enable Istio addon | string | `"false"` | no | +| kubernetes\_dashboard | Enable kubernetes dashboard addon | bool | `"false"` | no | +| kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | +| logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | +| maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | +| monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | +| name | The name of the cluster (required) | string | n/a | yes | +| network | The VPC network to host the cluster in (required) | string | n/a | yes | +| network\_policy | Enable network policy addon | bool | `"false"` | no | +| network\_policy\_provider | The network policy provider. | string | `"CALICO"` | no | +| network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | string | `""` | no | +| node\_metadata | Specifies how node metadata is exposed to the workload running on the node | string | `"UNSPECIFIED"` | no | +| node\_pools | List of maps containing node pools | list(map(string)) | `` | no | +| node\_pools\_labels | Map of maps containing node labels by node-pool name | map(map(string)) | `` | no | +| node\_pools\_metadata | Map of maps containing node metadata by node-pool name | map(map(string)) | `` | no | +| node\_pools\_oauth\_scopes | Map of lists containing node oauth scopes by node-pool name | map(list(string)) | `` | no | +| node\_pools\_tags | Map of lists containing node network tags by node-pool name | map(list(string)) | `` | no | +| node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | +| node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | +| non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | +| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | list | `` | no | +| project\_id | The project ID to host the cluster in (required) | string | n/a | yes | +| region | The region to host the cluster in (required) | string | n/a | yes | +| regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | +| remove\_default\_node\_pool | Remove default node pool while setting up the cluster | bool | `"false"` | no | +| service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | +| stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | +| subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | +| zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| ca\_certificate | Cluster ca certificate (base64 encoded) | +| cloudrun\_enabled | Whether CloudRun enabled | +| endpoint | Cluster endpoint | +| horizontal\_pod\_autoscaling\_enabled | Whether horizontal pod autoscaling enabled | +| http\_load\_balancing\_enabled | Whether http load balancing enabled | +| istio\_enabled | Whether Istio is enabled | +| kubernetes\_dashboard\_enabled | Whether kubernetes dashboard enabled | +| location | Cluster location (region if regional cluster, zone if zonal cluster) | +| logging\_service | Logging service used | +| master\_authorized\_networks\_config | Networks from which access to master is permitted | +| master\_version | Current master kubernetes version | +| min\_master\_version | Minimum master kubernetes version | +| monitoring\_service | Monitoring service used | +| name | Cluster name | +| network\_policy\_enabled | Whether network policy enabled | +| node\_pools\_names | List of node pools names | +| node\_pools\_versions | List of node pools versions | +| pod\_security\_policy\_enabled | Whether pod security policy is enabled | +| region | Cluster region | +| service\_account | The service account to default running nodes as if not overridden in `node_pools`. | +| type | Cluster type (regional / zonal) | +| zones | List of zones in which the cluster resides | + ## Requirements diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 1bacd15ffb..4de786246c 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -77,6 +77,12 @@ variable "node_version" { default = "" } +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + variable "horizontal_pod_autoscaling" { type = bool description = "Enable horizontal pod autoscaling addon" @@ -278,11 +284,21 @@ variable "cluster_ipv4_cidr" { description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." } + variable "istio" { description = "(Beta) Enable Istio addon" default = false } +variable "database_encryption" { + description = "Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: \"ENCRYPTED\"; \"DECRYPTED\". key_name is the name of a CloudKMS key." + type = list(object({ state = string, key_name = string })) + default = [{ + state = "DECRYPTED" + key_name = "" + }] +} + variable "cloudrun" { description = "(Beta) Enable CloudRun addon" default = false @@ -304,36 +320,3 @@ variable "node_metadata" { description = "Specifies how node metadata is exposed to the workload running on the node" default = "UNSPECIFIED" } - -variable "database_encryption" { - description = <<-EOF - Application-layer Secrets Encryption settings. Example: - database_encryption = [{ - state = "ENCRYPTED", - key_name = "projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key" - }] - EOF - type = "list" - default = [{ - state = "DECRYPTED" - key_name = "" - }] -} - -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = <<-EOF - The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) - - ### example format ### - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "10.0.0.0/8" - display_name = "example_network" - }], - }] - EOF - - default = [] -} diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 2e15f66398..ddf24b6d4d 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -125,10 +125,11 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o |------|-------------|:----:|:-----:|:-----:| | basic\_auth\_password | The password to be used with Basic Authentication. | string | `""` | no | | basic\_auth\_username | The username to be used with Basic Authentication. An empty value will disable Basic Authentication, which is the recommended configuration. | string | `""` | no | +| cluster\_ipv4\_cidr | The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR. | string | `""` | no | +| configure\_ip\_masq | Enables the installation of ip masquerading, which is usually no longer required when using aliasied IP addresses. IP masquerading uses a kubectl call, so when you have a private cluster, you will need access to the API server. | string | `"false"` | no | | deploy\_using\_private\_endpoint | (Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment. | bool | `"false"` | no | | description | The description of the cluster | string | `""` | no | | disable\_legacy\_metadata\_endpoints | Disable the /0.1/ and /v1beta1/ metadata server endpoints on the node. Changing this value will cause all node pools to be recreated. | bool | `"true"` | no | -| enable\_binary\_authorization | Enable BinAuthZ Admission controller | string | `"false"` | no | | enable\_private\_endpoint | (Beta) Whether the master's internal IP address is used as the cluster endpoint | bool | `"false"` | no | | enable\_private\_nodes | (Beta) Whether nodes have internal IP addresses only | bool | `"false"` | no | | horizontal\_pod\_autoscaling | Enable horizontal pod autoscaling addon | bool | `"true"` | no | @@ -143,7 +144,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | kubernetes\_version | The Kubernetes version of the masters. If set to 'latest' it will pull latest available version in the selected region. | string | `"latest"` | no | | logging\_service | The logging service that the cluster should write logs to. Available options include logging.googleapis.com, logging.googleapis.com/kubernetes (beta), and none | string | `"logging.googleapis.com"` | no | | maintenance\_start\_time | Time window specified for daily maintenance operations in RFC3339 format | string | `"05:00"` | no | -| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) | object | `` | no | +| master\_authorized\_networks\_config | The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists). | object | `` | no | | master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | string | `"10.0.0.0/28"` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | string | `"monitoring.googleapis.com"` | no | | name | The name of the cluster (required) | string | n/a | yes | @@ -159,7 +160,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | node\_pools\_taints | Map of lists containing node taints by node-pool name | object | `` | no | | node\_version | The Kubernetes version of the node pools. Defaults kubernetes_version (master) variable and can be overridden for individual node pools by setting the `version` key on them. Must be empyty or set the same as master at cluster creation. | string | `""` | no | | non\_masquerade\_cidrs | List of strings in CIDR notation that specify the IP address ranges that do not use IP masquerading. | list(string) | `` | no | -| pod\_security\_policy\_config | enabled - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. | object | `` | no | | project\_id | The project ID to host the cluster in (required) | string | n/a | yes | | region | The region to host the cluster in (required) | string | n/a | yes | | regional | Whether is a regional cluster (zonal cluster if set false. WARNING: changing this after cluster creation is destructive!) | bool | `"true"` | no | @@ -167,6 +167,7 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | service\_account | The service account to run nodes as if not overridden in `node_pools`. The default value will cause a cluster-specific service account to be created. | string | `"create"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | map(list(string)) | `` | no | | subnetwork | The subnetwork to host the cluster in (required) | string | n/a | yes | +| upstream\_nameservers | If specified, the values replace the nameservers taken by default from the node’s /etc/resolv.conf | list | `` | no | | zones | The zones to host the cluster in (optional if regional cluster / required if zonal) | list(string) | `` | no | ## Outputs @@ -188,7 +189,6 @@ In either case, upgrading to module version `v1.0.0` will trigger a recreation o | network\_policy\_enabled | Whether network policy enabled | | node\_pools\_names | List of node pools names | | node\_pools\_versions | List of node pools versions | -| pod\_security\_policy\_enabled | Whether pod security policy is enabled | | region | Cluster region | | service\_account | The service account to default running nodes as if not overridden in `node_pools`. | | type | Cluster type (regional / zonal) | diff --git a/modules/private-cluster/variables.tf b/modules/private-cluster/variables.tf index da768662a2..4491851740 100644 --- a/modules/private-cluster/variables.tf +++ b/modules/private-cluster/variables.tf @@ -77,6 +77,12 @@ variable "node_version" { default = "" } +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + variable "horizontal_pod_autoscaling" { type = bool description = "Enable horizontal pod autoscaling addon" @@ -278,6 +284,7 @@ variable "cluster_ipv4_cidr" { description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." } + variable "deploy_using_private_endpoint" { type = bool description = "(Beta) A toggle for Terraform and kubectl to connect to the master's internal IP address during deployment." @@ -301,21 +308,3 @@ variable "master_ipv4_cidr_block" { description = "(Beta) The IP range in CIDR notation to use for the hosted master network" default = "10.0.0.0/28" } - -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = <<-EOF - The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) - - ### example format ### - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "10.0.0.0/8" - display_name = "example_network" - }], - }] - EOF - - default = [] -} diff --git a/variables.tf b/variables.tf index 1100d5c101..beb05ea126 100644 --- a/variables.tf +++ b/variables.tf @@ -77,6 +77,12 @@ variable "node_version" { default = "" } +variable "master_authorized_networks_config" { + type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) + description = "The desired configuration options for master authorized networks. The object format is {cidr_blocks = list(object({cidr_block = string, display_name = string}))}. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists)." + default = [] +} + variable "horizontal_pod_autoscaling" { type = bool description = "Enable horizontal pod autoscaling addon" @@ -278,20 +284,3 @@ variable "cluster_ipv4_cidr" { description = "The IP address range of the kubernetes pods in this cluster. Default is an automatically assigned CIDR." } -variable "master_authorized_networks_config" { - type = list(object({ cidr_blocks = list(object({ cidr_block = string, display_name = string })) })) - - description = <<-EOF - The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists) - - ### example format ### - master_authorized_networks_config = [{ - cidr_blocks = [{ - cidr_block = "10.0.0.0/8" - display_name = "example_network" - }], - }] - EOF - - default = [] -} From d46de55a4f896ed6ca9ad4450c15c268b40d1acb Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Tue, 9 Jul 2019 15:52:18 -0400 Subject: [PATCH 07/16] [skip ci] Fix rebase error in CHANGELOG --- CHANGELOG.md | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0948c4ef1..cf1dfffe06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,12 @@ Extending the adopted spec, each change should have a link to its corresponding ## [Unreleased] -<<<<<<< HEAD +## [4.0.0] 2019-07-ZZ + +### Changed + +* Supported version of Terraform is 0.12. [#58] + ## [v3.0.0] - 2019-07-08 ### Added @@ -16,16 +21,6 @@ Extending the adopted spec, each change should have a link to its corresponding * Add configuration flag for enable BinAuthZ Admission controller [#160] [#188] * Add configuration flag for `pod_security_policy_config` [#163] [#188] * Support for a guest accelerator in node pool configuration. [#197] -======= -## [2.0.0] 2019-06-ZZ - -### Changed - -* Supported version of Terraform is 0.12. [#58] -* Add configuration flag for enable BinAuthZ Admission controller [#160] -* Add configuration flag for `pod_security_policy_config` [#163] -* Support for a guest accelerator in node pool configuration. [#157] ->>>>>>> Fixes #158: Add support for Terraform v0.12 * Support to scale the default node cluster. [#149] * Support for configuring the network policy provider. [#159] * Support for database encryption. [#165] From 0b47afa2e179a4cf83e0526dc033b82bb19b54f6 Mon Sep 17 00:00:00 2001 From: Jason Berlinsky Date: Tue, 9 Jul 2019 22:45:01 -0400 Subject: [PATCH 08/16] Simplify test process by removing intermediary image build step --- Makefile | 29 ++++++----------------- build/docker/kitchen_terraform/Dockerfile | 29 ----------------------- 2 files changed, 7 insertions(+), 51 deletions(-) delete mode 100644 build/docker/kitchen_terraform/Dockerfile diff --git a/Makefile b/Makefile index 27ecbaf396..265a529d9c 100644 --- a/Makefile +++ b/Makefile @@ -18,10 +18,8 @@ SHELL := /usr/bin/env bash # Docker build config variables CREDENTIALS_PATH ?= /cft/workdir/credentials.json DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.0.0 +DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.0.1 DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} -DOCKER_TAG_KITCHEN_TERRAFORM ?= ${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} -DOCKER_IMAGE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform_terraform-google-kubernetes-engine # All is the first target in the file so it will get picked up when you just run 'make' on its own .PHONY: all @@ -105,19 +103,6 @@ dev: generate generate_docs version: @source helpers/version-repo.sh -# Build Docker -.PHONY: docker_build_kitchen_terraform -docker_build_kitchen_terraform: - docker build -f build/docker/kitchen_terraform/Dockerfile \ - --build-arg BASE_IMAGE=${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ - -t ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} . - -# Push Docker image -.PHONY: docker_push_kitchen_terraform -docker_push_kitchen_terraform: - docker tag ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} ${DOCKER_ORG}/${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} - docker push ${DOCKER_ORG}/${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} - # Run docker .PHONY: docker_run docker_run: @@ -130,7 +115,7 @@ docker_run: -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "source test/ci_integration.sh && setup_environment && exec /bin/bash" .PHONY: docker_create @@ -144,7 +129,7 @@ docker_create: docker_build_kitchen_terraform -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen create" .PHONY: docker_converge @@ -158,7 +143,7 @@ docker_converge: -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen converge && kitchen converge" .PHONY: docker_verify @@ -172,7 +157,7 @@ docker_verify: -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen verify" .PHONY: docker_destroy @@ -186,7 +171,7 @@ docker_destroy: -e CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE=${CREDENTIALS_PATH} \ -e GOOGLE_APPLICATION_CREDENTIALS=${CREDENTIALS_PATH} \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "source test/ci_integration.sh && setup_environment && kitchen destroy" .PHONY: test_integration_docker @@ -198,5 +183,5 @@ test_integration_docker: -e ZONES \ -e SERVICE_ACCOUNT_JSON \ -v "$(CURDIR)":/cft/workdir \ - ${DOCKER_IMAGE_KITCHEN_TERRAFORM}:${DOCKER_TAG_KITCHEN_TERRAFORM} \ + ${DOCKER_REPO_BASE_KITCHEN_TERRAFORM} \ /bin/bash -c "test/ci_integration.sh" diff --git a/build/docker/kitchen_terraform/Dockerfile b/build/docker/kitchen_terraform/Dockerfile deleted file mode 100644 index 1cbddf7e7a..0000000000 --- a/build/docker/kitchen_terraform/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG BASE_IMAGE - -# hadolint ignore=DL3006 -FROM $BASE_IMAGE - -RUN apk add --no-cache \ - ca-certificates=20190108-r0 - -ADD https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/linux/amd64/kubectl /usr/local/bin/kubectl -RUN chmod +x /usr/local/bin/kubectl - -WORKDIR /opt/kitchen -COPY Gemfile . -RUN bundle install -WORKDIR $APP_BASE_DIR/workdir From e91e759c7be862e768673620fdbd9ae56f103652 Mon Sep 17 00:00:00 2001 From: Jason Berlinsky Date: Wed, 10 Jul 2019 10:27:28 -0400 Subject: [PATCH 09/16] Fix kitchen-terraform image version Co-Authored-By: Aaron Lane --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 265a529d9c..5c184a3d36 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ SHELL := /usr/bin/env bash # Docker build config variables CREDENTIALS_PATH ?= /cft/workdir/credentials.json DOCKER_ORG := gcr.io/cloud-foundation-cicd -DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.0.1 +DOCKER_TAG_BASE_KITCHEN_TERRAFORM ?= 2.1.0 DOCKER_REPO_BASE_KITCHEN_TERRAFORM := ${DOCKER_ORG}/cft/kitchen-terraform:${DOCKER_TAG_BASE_KITCHEN_TERRAFORM} # All is the first target in the file so it will get picked up when you just run 'make' on its own From f05ac55cc66b7889b0a3ccfba2cb506bcfb9b948 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 10 Jul 2019 10:57:13 -0400 Subject: [PATCH 10/16] Update make.sh from the module template --- test/make.sh | 134 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 96 insertions(+), 38 deletions(-) diff --git a/test/make.sh b/test/make.sh index c4464206c4..428960e919 100755 --- a/test/make.sh +++ b/test/make.sh @@ -14,76 +14,114 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This function checks to make sure that every -# shebang has a '- e' flag, which causes it -# to exit on error -function check_bash() { - find . -name "*.sh" | while IFS= read -d '' -r file; do - if [[ "$file" != *"bash -e"* ]]; then - echo "$file is missing shebang with -e" - exit 1 - fi - done +# Please note that this file was generated from [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template). +# Please make sure to contribute relevant changes upstream! + +# Create a temporary directory that's auto-cleaned, even if the process aborts. +DELETE_AT_EXIT="$(mktemp -d)" +finish() { + [[ -d "${DELETE_AT_EXIT}" ]] && rm -rf "${DELETE_AT_EXIT}" +} +trap finish EXIT +# Create a temporary file in the auto-cleaned up directory while avoiding +# overwriting TMPDIR for other processes. +# shellcheck disable=SC2120 # (Arguments may be passed, e.g. maketemp -d) +maketemp() { + TMPDIR="${DELETE_AT_EXIT}" mktemp "$@" +} + +# find_files is a helper to exclude .git directories and match only regular +# files to avoid double-processing symlinks. +find_files() { + local pth="$1" + shift + find "${pth}" '(' \ + -path '*/.git' -o \ + -path '*/.terraform' -o \ + -path './autogen' -o \ + -path './test/fixtures/all_examples' -o \ + -path './test/fixtures/shared' ')' \ + -prune -o -type f "$@" +} + +# Compatibility with both GNU and BSD style xargs. +compat_xargs() { + local compat=() + # Test if xargs is GNU or BSD style. GNU xargs will succeed with status 0 + # when given --no-run-if-empty and no input on STDIN. BSD xargs will fail and + # exit status non-zero If xargs fails, assume it is BSD style and proceed. + # stderr is silently redirected to avoid console log spam. + if xargs --no-run-if-empty /dev/null; then + compat=("--no-run-if-empty") + fi + xargs "${compat[@]}" "$@" } # This function makes sure that the required files for # releasing to OSS are present function basefiles() { - echo "Checking for required files" - test -f LICENSE || echo "Missing LICENSE" - test -f README.md || echo "Missing README.md" + local fn required_files="LICENSE README.md" + echo "Checking for required files ${required_files}" + for fn in ${required_files}; do + test -f "${fn}" || echo "Missing required file ${fn}" + done } # This function runs the hadolint linter on # every file named 'Dockerfile' function docker() { echo "Running hadolint on Dockerfiles" - find . -name "Dockerfile" -exec hadolint {} \; + find_files . -name "Dockerfile" -print0 \ + | compat_xargs -0 hadolint } -# This function runs 'terraform validate' against all -# files ending in '.tf' +# This function runs 'terraform validate' and 'terraform fmt' +# against all directory paths which contain *.tf files. function check_terraform() { - echo "Running terraform validate" - find . -name "*.tf" \ - -not -path "./autogen/*" \ - -not -path "./test/fixtures/all_examples/*" \ - -not -path "./test/fixtures/shared/*" \ - -print0 \ - | xargs -0 dirname | sort | uniq | xargs -L 1 -i{} bash -c 'terraform init "{}" > /dev/null && terraform validate "{}" && terraform fmt -check=true -write=false "{}"' + set -e + echo "Running terraform validate and terraform fmt" + find_files . -name "*.tf" -print0 \ + | compat_xargs -0 -n1 dirname \ + | sort -u \ + | compat_xargs -t -n1 -i{} bash -c \ + 'terraform init "{}" > /dev/null && terraform validate "{}" && terraform fmt -check=true -write=false "{}"' } # This function runs 'go fmt' and 'go vet' on every file # that ends in '.go' function golang() { echo "Running go fmt and go vet" - find . -name "*.go" -exec go fmt {} \; - find . -name "*.go" -exec go vet {} \; + find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go fmt + find_files . -name "*.go" -print0 | compat_xargs -0 -n1 go vet } # This function runs the flake8 linter on every file # ending in '.py' function check_python() { echo "Running flake8" - find . -name "*.py" -exec flake8 {} \; + find_files . -name "*.py" -print0 | compat_xargs -0 flake8 + return 0 } # This function runs the shellcheck linter on every # file ending in '.sh' function check_shell() { echo "Running shellcheck" - find . -name "*.sh" -exec shellcheck -x {} \; + find_files . -name "*.sh" -print0 | compat_xargs -0 shellcheck -x } # This function makes sure that there is no trailing whitespace # in any files in the project. # There are some exclusions function check_trailing_whitespace() { - echo "The following lines have trailing whitespace" - grep -r '[[:blank:]]$' --exclude-dir=".terraform" --exclude-dir=".kitchen" --exclude="*.png" --exclude="*.pyc" --exclude-dir=".git" . + local rc + echo "Checking for trailing whitespace" + find_files . -print \ + | grep -v -E '\.(pyc|png)$' \ + | compat_xargs grep -H -n '[[:blank:]]$' rc=$? - if [ $rc = 0 ]; then - exit 1 + if [[ ${rc} -eq 0 ]]; then + return 1 fi } @@ -94,18 +132,19 @@ function generate() { function generate_docs() { echo "Generating markdown docs with terraform-docs" - TMPFILE=$(mktemp) - #shellcheck disable=2006,2086 - for j in $(find ./ -name '*.tf' -type f -exec dirname '{}' \; | sort -u | grep -v ./autogen); do - if [[ -e "${j}/README.md" ]]; then + local path + while read -r path; do + if [[ -e "${path}/README.md" ]]; then # script seem to be designed to work into current directory - cd $j && echo "Working in ${j} ..." + cd "${path}" && echo "Working in ${path} ..." terraform_docs.sh . && echo Success! || echo "Warning! Exit code: ${?}" cd - >/dev/null else - echo "Skipping ${j} because README.md does not exist." + echo "Skipping ${path} because README.md does not exist." fi - done + done < <(find_files . -name '*.tf' -print0 \ + | compat_xargs -0 -n1 dirname \ + | sort -u) } function check_generate() { @@ -151,3 +190,22 @@ function check_generate_docs() { exit $rc } + +function prepare_test_variables() { + echo "Preparing terraform.tfvars files for integration tests" + #shellcheck disable=2044 + for i in $(find ./test/fixtures -type f -name terraform.tfvars.sample); do + destination=${i/%.sample/} + if [ ! -f "${destination}" ]; then + cp "${i}" "${destination}" + echo "${destination} has been created. Please edit it to reflect your GCP configuration." + fi + done +} + +function check_headers() { + echo "Checking file headers" + # Use the exclusion behavior of find_files + find_files . -type f -print0 \ + | compat_xargs -0 python test/verify_boilerplate.py +} From a3db4d8afa0bf9f9e4fcd3a7346abef44b065df4 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Wed, 10 Jul 2019 13:30:11 -0400 Subject: [PATCH 11/16] Add empty terraform.tfvars to fix broken symlinks This patch fixes the check_terraform failure caused by the absence of the symlinked terraform.tfvars. The alternative to this would be to remove the symlinks. --- test/fixtures/shared/terraform.tfvars | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 test/fixtures/shared/terraform.tfvars diff --git a/test/fixtures/shared/terraform.tfvars b/test/fixtures/shared/terraform.tfvars new file mode 100644 index 0000000000..e69de29bb2 From 79d454f0655c8b5eff2a65ea6263d183e98c9f2a Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 11 Jul 2019 11:19:22 -0400 Subject: [PATCH 12/16] Fix syntax for guest_accelerator dynamic can not be used as guest_accelerator is an attribute as a block. https://www.terraform.io/docs/configuration/attr-as-blocks.html --- autogen/cluster_regional.tf | 13 ++++++------- autogen/cluster_zonal.tf | 13 ++++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/autogen/cluster_regional.tf b/autogen/cluster_regional.tf index 96ff41bc5f..b8878e0378 100644 --- a/autogen/cluster_regional.tf +++ b/autogen/cluster_regional.tf @@ -271,16 +271,15 @@ resource "google_container_node_pool" "pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] {% if beta_cluster %} dynamic "workload_metadata_config" { diff --git a/autogen/cluster_zonal.tf b/autogen/cluster_zonal.tf index 9f3d6c4273..73ca493d66 100644 --- a/autogen/cluster_zonal.tf +++ b/autogen/cluster_zonal.tf @@ -268,16 +268,15 @@ resource "google_container_node_pool" "zonal_pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] {% if beta_cluster %} dynamic "workload_metadata_config" { From 1313afc38bcb5920fb8c2365b258e1ac93b1ef63 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 11 Jul 2019 11:21:01 -0400 Subject: [PATCH 13/16] Fix out of bounds use of slice on zones This appears to have changed from Terraform 0.11 which must have silently swallowed the out of bounds error. --- autogen/main.tf | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/autogen/main.tf b/autogen/main.tf index 59833e7e0c..407f65d21e 100644 --- a/autogen/main.tf +++ b/autogen/main.tf @@ -43,6 +43,7 @@ locals { custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) cluster_type = var.regional ? "regional" : "zonal" @@ -84,7 +85,7 @@ locals { } cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) - cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) + cluster_type_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_type_output_zones = { regional = local.cluster_type_output_regional_zones @@ -306,7 +307,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] + zone = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] project = var.project_id } From 121a505e8c5c3a78fd3d81142c6a709e0eb33fab Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 11 Jul 2019 11:36:47 -0400 Subject: [PATCH 14/16] Regenerate modules --- cluster_regional.tf | 13 ++++++------- cluster_zonal.tf | 13 ++++++------- main.tf | 5 +++-- modules/beta-private-cluster/cluster_regional.tf | 13 ++++++------- modules/beta-private-cluster/cluster_zonal.tf | 13 ++++++------- modules/beta-private-cluster/main.tf | 5 +++-- modules/beta-public-cluster/cluster_regional.tf | 13 ++++++------- modules/beta-public-cluster/cluster_zonal.tf | 13 ++++++------- modules/beta-public-cluster/main.tf | 5 +++-- modules/private-cluster/cluster_regional.tf | 13 ++++++------- modules/private-cluster/cluster_zonal.tf | 13 ++++++------- modules/private-cluster/main.tf | 5 +++-- 12 files changed, 60 insertions(+), 64 deletions(-) diff --git a/cluster_regional.tf b/cluster_regional.tf index b23b25ef08..66c3ec33a8 100644 --- a/cluster_regional.tf +++ b/cluster_regional.tf @@ -213,16 +213,15 @@ resource "google_container_node_pool" "pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] } lifecycle { diff --git a/cluster_zonal.tf b/cluster_zonal.tf index 13cdb99ace..1bf674f82d 100644 --- a/cluster_zonal.tf +++ b/cluster_zonal.tf @@ -211,16 +211,15 @@ resource "google_container_node_pool" "zonal_pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] } lifecycle { diff --git a/main.tf b/main.tf index 371cb4c2ea..218babf66a 100644 --- a/main.tf +++ b/main.tf @@ -39,6 +39,7 @@ locals { custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) cluster_type = var.regional ? "regional" : "zonal" @@ -72,7 +73,7 @@ locals { } cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) - cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) + cluster_type_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_type_output_zones = { regional = local.cluster_type_output_regional_zones @@ -266,7 +267,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] + zone = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] project = var.project_id } diff --git a/modules/beta-private-cluster/cluster_regional.tf b/modules/beta-private-cluster/cluster_regional.tf index 87eb56365e..33b07af674 100644 --- a/modules/beta-private-cluster/cluster_regional.tf +++ b/modules/beta-private-cluster/cluster_regional.tf @@ -257,16 +257,15 @@ resource "google_container_node_pool" "pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] dynamic "workload_metadata_config" { for_each = local.cluster_node_metadata_config diff --git a/modules/beta-private-cluster/cluster_zonal.tf b/modules/beta-private-cluster/cluster_zonal.tf index afbc294f0a..baff6adca7 100644 --- a/modules/beta-private-cluster/cluster_zonal.tf +++ b/modules/beta-private-cluster/cluster_zonal.tf @@ -254,16 +254,15 @@ resource "google_container_node_pool" "zonal_pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] dynamic "workload_metadata_config" { for_each = local.cluster_node_metadata_config diff --git a/modules/beta-private-cluster/main.tf b/modules/beta-private-cluster/main.tf index 4120899f43..91d4722e55 100644 --- a/modules/beta-private-cluster/main.tf +++ b/modules/beta-private-cluster/main.tf @@ -39,6 +39,7 @@ locals { custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) cluster_type = var.regional ? "regional" : "zonal" @@ -78,7 +79,7 @@ locals { } cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) - cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) + cluster_type_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_type_output_zones = { regional = local.cluster_type_output_regional_zones @@ -286,7 +287,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] + zone = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] project = var.project_id } diff --git a/modules/beta-public-cluster/cluster_regional.tf b/modules/beta-public-cluster/cluster_regional.tf index bee29c5c00..a56ebb5634 100644 --- a/modules/beta-public-cluster/cluster_regional.tf +++ b/modules/beta-public-cluster/cluster_regional.tf @@ -251,16 +251,15 @@ resource "google_container_node_pool" "pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] dynamic "workload_metadata_config" { for_each = local.cluster_node_metadata_config diff --git a/modules/beta-public-cluster/cluster_zonal.tf b/modules/beta-public-cluster/cluster_zonal.tf index c35f11818e..39e7a1a1fb 100644 --- a/modules/beta-public-cluster/cluster_zonal.tf +++ b/modules/beta-public-cluster/cluster_zonal.tf @@ -249,16 +249,15 @@ resource "google_container_node_pool" "zonal_pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] dynamic "workload_metadata_config" { for_each = local.cluster_node_metadata_config diff --git a/modules/beta-public-cluster/main.tf b/modules/beta-public-cluster/main.tf index f4f27751fb..0bc2bbcd1c 100644 --- a/modules/beta-public-cluster/main.tf +++ b/modules/beta-public-cluster/main.tf @@ -39,6 +39,7 @@ locals { custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) cluster_type = var.regional ? "regional" : "zonal" @@ -78,7 +79,7 @@ locals { } cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) - cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) + cluster_type_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_type_output_zones = { regional = local.cluster_type_output_regional_zones @@ -288,7 +289,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] + zone = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] project = var.project_id } diff --git a/modules/private-cluster/cluster_regional.tf b/modules/private-cluster/cluster_regional.tf index f3ed67b0a4..7e12240b1d 100644 --- a/modules/private-cluster/cluster_regional.tf +++ b/modules/private-cluster/cluster_regional.tf @@ -219,16 +219,15 @@ resource "google_container_node_pool" "pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]] ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] } lifecycle { diff --git a/modules/private-cluster/cluster_zonal.tf b/modules/private-cluster/cluster_zonal.tf index 9b44c6d478..0109263b07 100644 --- a/modules/private-cluster/cluster_zonal.tf +++ b/modules/private-cluster/cluster_zonal.tf @@ -216,16 +216,15 @@ resource "google_container_node_pool" "zonal_pools" { var.node_pools_oauth_scopes[var.node_pools[count.index]["name"]], ) - dynamic "guest_accelerator" { - for_each = lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ + guest_accelerator = [ + for guest_accelerator in lookup(var.node_pools[count.index], "accelerator_count", 0) > 0 ? [{ type = lookup(var.node_pools[count.index], "accelerator_type", "") count = lookup(var.node_pools[count.index], "accelerator_count", 0) - }] : [] - content { - type = guest_accelerator.value.type - count = guest_accelerator.value.count + }] : [] : { + type = guest_accelerator["type"] + count = guest_accelerator["count"] } - } + ] } lifecycle { diff --git a/modules/private-cluster/main.tf b/modules/private-cluster/main.tf index b71b79b18a..d39207827d 100644 --- a/modules/private-cluster/main.tf +++ b/modules/private-cluster/main.tf @@ -39,6 +39,7 @@ locals { custom_kube_dns_config = length(keys(var.stub_domains)) > 0 upstream_nameservers_config = length(var.upstream_nameservers) > 0 network_project_id = var.network_project_id != "" ? var.network_project_id : var.project_id + zone_count = length(var.zones) cluster_type = var.regional ? "regional" : "zonal" @@ -72,7 +73,7 @@ locals { } cluster_type_output_regional_zones = flatten(google_container_cluster.primary.*.node_locations) - cluster_type_output_zonal_zones = slice(var.zones, 1, length(var.zones)) + cluster_type_output_zonal_zones = local.zone_count > 1 ? slice(var.zones, 1, local.zone_count) : [] cluster_type_output_zones = { regional = local.cluster_type_output_regional_zones @@ -264,7 +265,7 @@ data "google_container_engine_versions" "zone" { // // data.google_container_engine_versions.zone: Cannot determine zone: set in this resource, or set provider-level zone. // - zone = var.zones[0] == "" ? data.google_compute_zones.available.names[0] : var.zones[0] + zone = local.zone_count == 0 ? data.google_compute_zones.available.names[0] : var.zones[0] project = var.project_id } From 555dcc1970a16fff66bb4304cfc9b8b9c1a7b689 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 11 Jul 2019 12:10:10 -0400 Subject: [PATCH 15/16] Add missing license headers --- .../stub_domains_upstream_nameservers/versions.tf | 15 +++++++++++++++ test/fixtures/upstream_nameservers/versions.tf | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/test/fixtures/stub_domains_upstream_nameservers/versions.tf b/test/fixtures/stub_domains_upstream_nameservers/versions.tf index ac97c6ac8e..832ec1df39 100644 --- a/test/fixtures/stub_domains_upstream_nameservers/versions.tf +++ b/test/fixtures/stub_domains_upstream_nameservers/versions.tf @@ -1,3 +1,18 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ terraform { required_version = ">= 0.12" diff --git a/test/fixtures/upstream_nameservers/versions.tf b/test/fixtures/upstream_nameservers/versions.tf index ac97c6ac8e..832ec1df39 100644 --- a/test/fixtures/upstream_nameservers/versions.tf +++ b/test/fixtures/upstream_nameservers/versions.tf @@ -1,3 +1,18 @@ +/** + * Copyright 2018 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ terraform { required_version = ">= 0.12" From a807f21fc9b43684a8e59feaf4653ab6d4816596 Mon Sep 17 00:00:00 2001 From: Aaron Lane Date: Thu, 11 Jul 2019 14:41:36 -0400 Subject: [PATCH 16/16] Add note about Terraform compatibility --- README.md | 10 ++++++++++ autogen/README.md | 10 ++++++++++ modules/beta-private-cluster/README.md | 10 ++++++++++ modules/beta-public-cluster/README.md | 10 ++++++++++ modules/private-cluster/README.md | 10 ++++++++++ 5 files changed, 50 insertions(+) diff --git a/README.md b/README.md index 5dd2adccbf..5d09951671 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,14 @@ The resources/services/activations/deletions that this module will create/trigge - Activate network policy if `network_policy` is true - Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `network_policy` is true + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + ## Usage There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: @@ -373,3 +381,5 @@ command. [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: docs/upgrading_to_v3.0.md [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/autogen/README.md b/autogen/README.md index 0e5d22cfe5..85c803578a 100644 --- a/autogen/README.md +++ b/autogen/README.md @@ -13,6 +13,14 @@ The resources/services/activations/deletions that this module will create/trigge **Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. {% endif %} + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + ## Usage There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: @@ -332,3 +340,5 @@ command. {% else %} [terraform-provider-google]: https://github.com/terraform-providers/terraform-provider-google {% endif %} +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 495cd14289..475159e1a6 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -10,6 +10,14 @@ The resources/services/activations/deletions that this module will create/trigge **Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + ## Usage There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: @@ -393,3 +401,5 @@ command. [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index 915e779c88..32df7c1856 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -8,6 +8,14 @@ The resources/services/activations/deletions that this module will create/trigge - Activate network policy if `network_policy` is true - Add `ip-masq-agent` configmap with provided `non_masquerade_cidrs` if `network_policy` is true + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + ## Usage There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: @@ -384,3 +392,5 @@ command. [upgrading-to-v2.0]: docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index ddf24b6d4d..906a826a94 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -10,6 +10,14 @@ The resources/services/activations/deletions that this module will create/trigge **Note**: You must run Terraform from a VM on the same VPC as your cluster, otherwise there will be issues connecting to the GKE master. + +## Compatibility + +This module is meant for use with Terraform 0.12. If you haven't +[upgraded][terraform-0.12-upgrade] and need a Terraform +0.11.x-compatible version of this module, the last released version +intended for Terraform 0.11.x is [3.0.0]. + ## Usage There are multiple examples included in the [examples](./examples/) folder but simple usage is as follows: @@ -382,3 +390,5 @@ command. [upgrading-to-v2.0]: ../../docs/upgrading_to_v2.0.md [upgrading-to-v3.0]: ../../docs/upgrading_to_v3.0.md [terraform-provider-google-beta]: https://github.com/terraform-providers/terraform-provider-google-beta +[3.0.0]: https://registry.terraform.io/modules/terraform-google-modules/kubernetes-engine/google/3.0.0 +[terraform-0.12-upgrade]: https://www.terraform.io/upgrade-guides/0-12.html