From 1d8d7f2de4e8e5258b00a67f894f519a17574c3c Mon Sep 17 00:00:00 2001 From: Felipe Esteves Date: Fri, 24 Feb 2023 09:57:26 -0300 Subject: [PATCH] Set blue green defaults null to use provider default --- README.md | 14 +++--- autogen/main/README.md | 14 +++--- autogen/main/cluster.tf.tmpl | 6 +-- autogen/main/variables.tf.tmpl | 43 +------------------ cluster.tf | 12 +++--- .../README.md | 21 +++------ .../cluster.tf | 12 +++--- .../variables.tf | 43 +------------------ modules/beta-private-cluster/README.md | 21 +++------ modules/beta-private-cluster/cluster.tf | 12 +++--- modules/beta-private-cluster/variables.tf | 43 +------------------ .../README.md | 21 +++------ .../cluster.tf | 12 +++--- .../variables.tf | 43 +------------------ modules/beta-public-cluster/README.md | 21 +++------ modules/beta-public-cluster/cluster.tf | 12 +++--- modules/beta-public-cluster/variables.tf | 43 +------------------ .../private-cluster-update-variant/README.md | 14 +++--- .../private-cluster-update-variant/cluster.tf | 12 +++--- modules/private-cluster/README.md | 14 +++--- modules/private-cluster/cluster.tf | 12 +++--- 21 files changed, 106 insertions(+), 339 deletions(-) diff --git a/README.md b/README.md index 0a10dac9ce..f5ec9a0fef 100644 --- a/README.md +++ b/README.md @@ -278,13 +278,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | diff --git a/autogen/main/README.md b/autogen/main/README.md index b69a6dc13e..df40ab99af 100644 --- a/autogen/main/README.md +++ b/autogen/main/README.md @@ -214,13 +214,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | {% if beta_cluster %} diff --git a/autogen/main/cluster.tf.tmpl b/autogen/main/cluster.tf.tmpl index 27bce7243a..a2bca488da 100644 --- a/autogen/main/cluster.tf.tmpl +++ b/autogen/main/cluster.tf.tmpl @@ -702,12 +702,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/autogen/main/variables.tf.tmpl b/autogen/main/variables.tf.tmpl index 1b324fa084..c0f83fba1f 100644 --- a/autogen/main/variables.tf.tmpl +++ b/autogen/main/variables.tf.tmpl @@ -734,6 +734,7 @@ variable "enable_pod_security_policy" { default = false } + variable "enable_l4_ilb_subsetting" { type = bool description = "Enable L4 ILB Subsetting on the cluster" @@ -757,47 +758,5 @@ variable "enable_identity_service" { description = "Enable the Identity Service component, which allows customers to use external identity providers with the K8S API." default = false } - -variable "strategy" { - type = string - description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)" - default = "SURGE" -} - -variable "max_surge" { - type = number - description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)" - default = null -} - -variable "max_unavailable" { - type = number - description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)" - default = null -} - -variable "node_pool_soak_duration" { - type = string - description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)" - default = "3600s" -} - -variable "batch_soak_duration" { - type = string - description = "Soak time after each batch gets drained (Optionial)" - default = "0s" -} - -variable "batch_percentage" { - type = string - description = "Percentage of the blue pool nodes to drain in a batch (Optional)" - default = null -} - -variable "batch_node_count" { - type = number - description = "The number of blue nodes to drain in a batch (Optional)" - default = null -} {% endif %} {% endif %} diff --git a/cluster.tf b/cluster.tf index 01424871ce..694bc2eb05 100644 --- a/cluster.tf +++ b/cluster.tf @@ -413,12 +413,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -599,12 +599,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index 22be2b83d0..1c66e6b201 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -163,9 +163,6 @@ Then perform the following commands on the root folder: | add\_master\_webhook\_firewall\_rules | Create master\_webhook firewall rules for ports defined in `firewall_inbound_ports` | `bool` | `false` | no | | add\_shadow\_firewall\_rules | Create GKE shadow firewall (the same as default firewall rules with firewall logs enabled). | `bool` | `false` | no | | authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | `string` | `null` | no | -| batch\_node\_count | The number of blue nodes to drain in a batch (Optional) | `number` | `null` | no | -| batch\_percentage | Percentage of the blue pool nodes to drain in a batch (Optional) | `string` | `null` | no | -| batch\_soak\_duration | Soak time after each batch gets drained (Optionial) | `string` | `"0s"` | no | | cloudrun | (Beta) Enable CloudRun addon | `bool` | `false` | no | | cloudrun\_load\_balancer\_type | (Beta) Configure the Cloud Run load balancer type. External by default. Set to `LOAD_BALANCER_TYPE_INTERNAL` to configure as an internal load balancer. | `string` | `""` | no | | cluster\_autoscaling | Cluster autoscaling configuration. See [more details](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#clusterautoscaling) |
object({
enabled = bool
autoscaling_profile = string
min_cpu_cores = number
max_cpu_cores = number
min_memory_gb = number
max_memory_gb = number
gpu_resources = list(object({ resource_type = string, minimum = number, maximum = number }))
auto_repair = bool
auto_upgrade = bool
})
|
{
"auto_repair": true,
"auto_upgrade": true,
"autoscaling_profile": "BALANCED",
"enabled": false,
"gpu_resources": [],
"max_cpu_cores": 0,
"max_memory_gb": 0,
"min_cpu_cores": 0,
"min_memory_gb": 0
}
| no | @@ -230,8 +227,6 @@ Then perform the following commands on the root folder: | master\_authorized\_networks | List of master authorized networks. If none are provided, disallow external access (except the cluster node IPs, which GKE automatically whitelists). | `list(object({ cidr_block = string, display_name = string }))` | `[]` | no | | master\_global\_access\_enabled | Whether the cluster master is accessible globally (from any region) or only within the same region as the private endpoint. | `bool` | `true` | no | | master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | `string` | `"10.0.0.0/28"` | no | -| max\_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max\_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional) | `number` | `null` | no | -| max\_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max\_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional) | `number` | `null` | no | | monitoring\_enable\_managed\_prometheus | Configuration for Managed Service for Prometheus. Whether or not the managed collection is enabled. | `bool` | `false` | no | | monitoring\_enabled\_components | List of services to monitor: SYSTEM\_COMPONENTS, WORKLOADS (provider version >= 3.89.0). Empty list is default GKE configuration. | `list(string)` | `[]` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | `string` | `"monitoring.googleapis.com/kubernetes"` | no | @@ -241,7 +236,6 @@ Then perform the following commands on the root folder: | network\_policy\_provider | The network policy provider. | `string` | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | `string` | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | `string` | `"GKE_METADATA"` | no | -| node\_pool\_soak\_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional) | `string` | `"3600s"` | no | | node\_pools | List of maps containing node pools | `list(map(any))` |
[
{
"name": "default-node-pool"
}
]
| no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | | node\_pools\_linux\_node\_configs\_sysctls | Map of maps containing linux node config sysctls by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | @@ -265,7 +259,6 @@ Then perform the following commands on the root folder: | shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | -| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional) | `string` | `"SURGE"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | @@ -350,13 +343,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 895c3dd345..926b3a5232 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -602,12 +602,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -828,12 +828,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/beta-private-cluster-update-variant/variables.tf b/modules/beta-private-cluster-update-variant/variables.tf index df3bb71e20..7fa57dfe6d 100644 --- a/modules/beta-private-cluster-update-variant/variables.tf +++ b/modules/beta-private-cluster-update-variant/variables.tf @@ -697,6 +697,7 @@ variable "enable_pod_security_policy" { default = false } + variable "enable_l4_ilb_subsetting" { type = bool description = "Enable L4 ILB Subsetting on the cluster" @@ -720,45 +721,3 @@ variable "enable_identity_service" { description = "Enable the Identity Service component, which allows customers to use external identity providers with the K8S API." default = false } - -variable "strategy" { - type = string - description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)" - default = "SURGE" -} - -variable "max_surge" { - type = number - description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)" - default = null -} - -variable "max_unavailable" { - type = number - description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)" - default = null -} - -variable "node_pool_soak_duration" { - type = string - description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)" - default = "3600s" -} - -variable "batch_soak_duration" { - type = string - description = "Soak time after each batch gets drained (Optionial)" - default = "0s" -} - -variable "batch_percentage" { - type = string - description = "Percentage of the blue pool nodes to drain in a batch (Optional)" - default = null -} - -variable "batch_node_count" { - type = number - description = "The number of blue nodes to drain in a batch (Optional)" - default = null -} diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index 510fb4502a..985bcb3600 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -141,9 +141,6 @@ Then perform the following commands on the root folder: | add\_master\_webhook\_firewall\_rules | Create master\_webhook firewall rules for ports defined in `firewall_inbound_ports` | `bool` | `false` | no | | add\_shadow\_firewall\_rules | Create GKE shadow firewall (the same as default firewall rules with firewall logs enabled). | `bool` | `false` | no | | authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | `string` | `null` | no | -| batch\_node\_count | The number of blue nodes to drain in a batch (Optional) | `number` | `null` | no | -| batch\_percentage | Percentage of the blue pool nodes to drain in a batch (Optional) | `string` | `null` | no | -| batch\_soak\_duration | Soak time after each batch gets drained (Optionial) | `string` | `"0s"` | no | | cloudrun | (Beta) Enable CloudRun addon | `bool` | `false` | no | | cloudrun\_load\_balancer\_type | (Beta) Configure the Cloud Run load balancer type. External by default. Set to `LOAD_BALANCER_TYPE_INTERNAL` to configure as an internal load balancer. | `string` | `""` | no | | cluster\_autoscaling | Cluster autoscaling configuration. See [more details](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#clusterautoscaling) |
object({
enabled = bool
autoscaling_profile = string
min_cpu_cores = number
max_cpu_cores = number
min_memory_gb = number
max_memory_gb = number
gpu_resources = list(object({ resource_type = string, minimum = number, maximum = number }))
auto_repair = bool
auto_upgrade = bool
})
|
{
"auto_repair": true,
"auto_upgrade": true,
"autoscaling_profile": "BALANCED",
"enabled": false,
"gpu_resources": [],
"max_cpu_cores": 0,
"max_memory_gb": 0,
"min_cpu_cores": 0,
"min_memory_gb": 0
}
| no | @@ -208,8 +205,6 @@ Then perform the following commands on the root folder: | master\_authorized\_networks | List of master authorized networks. If none are provided, disallow external access (except the cluster node IPs, which GKE automatically whitelists). | `list(object({ cidr_block = string, display_name = string }))` | `[]` | no | | master\_global\_access\_enabled | Whether the cluster master is accessible globally (from any region) or only within the same region as the private endpoint. | `bool` | `true` | no | | master\_ipv4\_cidr\_block | (Beta) The IP range in CIDR notation to use for the hosted master network | `string` | `"10.0.0.0/28"` | no | -| max\_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max\_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional) | `number` | `null` | no | -| max\_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max\_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional) | `number` | `null` | no | | monitoring\_enable\_managed\_prometheus | Configuration for Managed Service for Prometheus. Whether or not the managed collection is enabled. | `bool` | `false` | no | | monitoring\_enabled\_components | List of services to monitor: SYSTEM\_COMPONENTS, WORKLOADS (provider version >= 3.89.0). Empty list is default GKE configuration. | `list(string)` | `[]` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | `string` | `"monitoring.googleapis.com/kubernetes"` | no | @@ -219,7 +214,6 @@ Then perform the following commands on the root folder: | network\_policy\_provider | The network policy provider. | `string` | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | `string` | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | `string` | `"GKE_METADATA"` | no | -| node\_pool\_soak\_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional) | `string` | `"3600s"` | no | | node\_pools | List of maps containing node pools | `list(map(any))` |
[
{
"name": "default-node-pool"
}
]
| no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | | node\_pools\_linux\_node\_configs\_sysctls | Map of maps containing linux node config sysctls by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | @@ -243,7 +237,6 @@ Then perform the following commands on the root folder: | shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | -| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional) | `string` | `"SURGE"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | @@ -328,13 +321,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index 7f7544518e..fc2d22d5eb 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -508,12 +508,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -733,12 +733,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/beta-private-cluster/variables.tf b/modules/beta-private-cluster/variables.tf index df3bb71e20..7fa57dfe6d 100644 --- a/modules/beta-private-cluster/variables.tf +++ b/modules/beta-private-cluster/variables.tf @@ -697,6 +697,7 @@ variable "enable_pod_security_policy" { default = false } + variable "enable_l4_ilb_subsetting" { type = bool description = "Enable L4 ILB Subsetting on the cluster" @@ -720,45 +721,3 @@ variable "enable_identity_service" { description = "Enable the Identity Service component, which allows customers to use external identity providers with the K8S API." default = false } - -variable "strategy" { - type = string - description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)" - default = "SURGE" -} - -variable "max_surge" { - type = number - description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)" - default = null -} - -variable "max_unavailable" { - type = number - description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)" - default = null -} - -variable "node_pool_soak_duration" { - type = string - description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)" - default = "3600s" -} - -variable "batch_soak_duration" { - type = string - description = "Soak time after each batch gets drained (Optionial)" - default = "0s" -} - -variable "batch_percentage" { - type = string - description = "Percentage of the blue pool nodes to drain in a batch (Optional)" - default = null -} - -variable "batch_node_count" { - type = number - description = "The number of blue nodes to drain in a batch (Optional)" - default = null -} diff --git a/modules/beta-public-cluster-update-variant/README.md b/modules/beta-public-cluster-update-variant/README.md index b79c34cf4d..d626229292 100644 --- a/modules/beta-public-cluster-update-variant/README.md +++ b/modules/beta-public-cluster-update-variant/README.md @@ -157,9 +157,6 @@ Then perform the following commands on the root folder: | add\_master\_webhook\_firewall\_rules | Create master\_webhook firewall rules for ports defined in `firewall_inbound_ports` | `bool` | `false` | no | | add\_shadow\_firewall\_rules | Create GKE shadow firewall (the same as default firewall rules with firewall logs enabled). | `bool` | `false` | no | | authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | `string` | `null` | no | -| batch\_node\_count | The number of blue nodes to drain in a batch (Optional) | `number` | `null` | no | -| batch\_percentage | Percentage of the blue pool nodes to drain in a batch (Optional) | `string` | `null` | no | -| batch\_soak\_duration | Soak time after each batch gets drained (Optionial) | `string` | `"0s"` | no | | cloudrun | (Beta) Enable CloudRun addon | `bool` | `false` | no | | cloudrun\_load\_balancer\_type | (Beta) Configure the Cloud Run load balancer type. External by default. Set to `LOAD_BALANCER_TYPE_INTERNAL` to configure as an internal load balancer. | `string` | `""` | no | | cluster\_autoscaling | Cluster autoscaling configuration. See [more details](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#clusterautoscaling) |
object({
enabled = bool
autoscaling_profile = string
min_cpu_cores = number
max_cpu_cores = number
min_memory_gb = number
max_memory_gb = number
gpu_resources = list(object({ resource_type = string, minimum = number, maximum = number }))
auto_repair = bool
auto_upgrade = bool
})
|
{
"auto_repair": true,
"auto_upgrade": true,
"autoscaling_profile": "BALANCED",
"enabled": false,
"gpu_resources": [],
"max_cpu_cores": 0,
"max_memory_gb": 0,
"min_cpu_cores": 0,
"min_memory_gb": 0
}
| no | @@ -219,8 +216,6 @@ Then perform the following commands on the root folder: | maintenance\_recurrence | Frequency of the recurring maintenance window in RFC5545 format. | `string` | `""` | no | | maintenance\_start\_time | Time window specified for daily or recurring maintenance operations in RFC3339 format | `string` | `"05:00"` | no | | master\_authorized\_networks | List of master authorized networks. If none are provided, disallow external access (except the cluster node IPs, which GKE automatically whitelists). | `list(object({ cidr_block = string, display_name = string }))` | `[]` | no | -| max\_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max\_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional) | `number` | `null` | no | -| max\_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max\_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional) | `number` | `null` | no | | monitoring\_enable\_managed\_prometheus | Configuration for Managed Service for Prometheus. Whether or not the managed collection is enabled. | `bool` | `false` | no | | monitoring\_enabled\_components | List of services to monitor: SYSTEM\_COMPONENTS, WORKLOADS (provider version >= 3.89.0). Empty list is default GKE configuration. | `list(string)` | `[]` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | `string` | `"monitoring.googleapis.com/kubernetes"` | no | @@ -230,7 +225,6 @@ Then perform the following commands on the root folder: | network\_policy\_provider | The network policy provider. | `string` | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | `string` | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | `string` | `"GKE_METADATA"` | no | -| node\_pool\_soak\_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional) | `string` | `"3600s"` | no | | node\_pools | List of maps containing node pools | `list(map(any))` |
[
{
"name": "default-node-pool"
}
]
| no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | | node\_pools\_linux\_node\_configs\_sysctls | Map of maps containing linux node config sysctls by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | @@ -254,7 +248,6 @@ Then perform the following commands on the root folder: | shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | -| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional) | `string` | `"SURGE"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | @@ -337,13 +330,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | diff --git a/modules/beta-public-cluster-update-variant/cluster.tf b/modules/beta-public-cluster-update-variant/cluster.tf index b60d577405..84087f07d3 100644 --- a/modules/beta-public-cluster-update-variant/cluster.tf +++ b/modules/beta-public-cluster-update-variant/cluster.tf @@ -582,12 +582,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -807,12 +807,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/beta-public-cluster-update-variant/variables.tf b/modules/beta-public-cluster-update-variant/variables.tf index 6a1e56e88f..970d25f40d 100644 --- a/modules/beta-public-cluster-update-variant/variables.tf +++ b/modules/beta-public-cluster-update-variant/variables.tf @@ -667,6 +667,7 @@ variable "enable_pod_security_policy" { default = false } + variable "enable_l4_ilb_subsetting" { type = bool description = "Enable L4 ILB Subsetting on the cluster" @@ -690,45 +691,3 @@ variable "enable_identity_service" { description = "Enable the Identity Service component, which allows customers to use external identity providers with the K8S API." default = false } - -variable "strategy" { - type = string - description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)" - default = "SURGE" -} - -variable "max_surge" { - type = number - description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)" - default = null -} - -variable "max_unavailable" { - type = number - description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)" - default = null -} - -variable "node_pool_soak_duration" { - type = string - description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)" - default = "3600s" -} - -variable "batch_soak_duration" { - type = string - description = "Soak time after each batch gets drained (Optionial)" - default = "0s" -} - -variable "batch_percentage" { - type = string - description = "Percentage of the blue pool nodes to drain in a batch (Optional)" - default = null -} - -variable "batch_node_count" { - type = number - description = "The number of blue nodes to drain in a batch (Optional)" - default = null -} diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index e8130450f0..128dc6f6c9 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -135,9 +135,6 @@ Then perform the following commands on the root folder: | add\_master\_webhook\_firewall\_rules | Create master\_webhook firewall rules for ports defined in `firewall_inbound_ports` | `bool` | `false` | no | | add\_shadow\_firewall\_rules | Create GKE shadow firewall (the same as default firewall rules with firewall logs enabled). | `bool` | `false` | no | | authenticator\_security\_group | The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com | `string` | `null` | no | -| batch\_node\_count | The number of blue nodes to drain in a batch (Optional) | `number` | `null` | no | -| batch\_percentage | Percentage of the blue pool nodes to drain in a batch (Optional) | `string` | `null` | no | -| batch\_soak\_duration | Soak time after each batch gets drained (Optionial) | `string` | `"0s"` | no | | cloudrun | (Beta) Enable CloudRun addon | `bool` | `false` | no | | cloudrun\_load\_balancer\_type | (Beta) Configure the Cloud Run load balancer type. External by default. Set to `LOAD_BALANCER_TYPE_INTERNAL` to configure as an internal load balancer. | `string` | `""` | no | | cluster\_autoscaling | Cluster autoscaling configuration. See [more details](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#clusterautoscaling) |
object({
enabled = bool
autoscaling_profile = string
min_cpu_cores = number
max_cpu_cores = number
min_memory_gb = number
max_memory_gb = number
gpu_resources = list(object({ resource_type = string, minimum = number, maximum = number }))
auto_repair = bool
auto_upgrade = bool
})
|
{
"auto_repair": true,
"auto_upgrade": true,
"autoscaling_profile": "BALANCED",
"enabled": false,
"gpu_resources": [],
"max_cpu_cores": 0,
"max_memory_gb": 0,
"min_cpu_cores": 0,
"min_memory_gb": 0
}
| no | @@ -197,8 +194,6 @@ Then perform the following commands on the root folder: | maintenance\_recurrence | Frequency of the recurring maintenance window in RFC5545 format. | `string` | `""` | no | | maintenance\_start\_time | Time window specified for daily or recurring maintenance operations in RFC3339 format | `string` | `"05:00"` | no | | master\_authorized\_networks | List of master authorized networks. If none are provided, disallow external access (except the cluster node IPs, which GKE automatically whitelists). | `list(object({ cidr_block = string, display_name = string }))` | `[]` | no | -| max\_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max\_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional) | `number` | `null` | no | -| max\_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max\_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional) | `number` | `null` | no | | monitoring\_enable\_managed\_prometheus | Configuration for Managed Service for Prometheus. Whether or not the managed collection is enabled. | `bool` | `false` | no | | monitoring\_enabled\_components | List of services to monitor: SYSTEM\_COMPONENTS, WORKLOADS (provider version >= 3.89.0). Empty list is default GKE configuration. | `list(string)` | `[]` | no | | monitoring\_service | The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com, monitoring.googleapis.com/kubernetes (beta) and none | `string` | `"monitoring.googleapis.com/kubernetes"` | no | @@ -208,7 +203,6 @@ Then perform the following commands on the root folder: | network\_policy\_provider | The network policy provider. | `string` | `"CALICO"` | no | | network\_project\_id | The project ID of the shared VPC's host (for shared vpc support) | `string` | `""` | no | | node\_metadata | Specifies how node metadata is exposed to the workload running on the node | `string` | `"GKE_METADATA"` | no | -| node\_pool\_soak\_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional) | `string` | `"3600s"` | no | | node\_pools | List of maps containing node pools | `list(map(any))` |
[
{
"name": "default-node-pool"
}
]
| no | | node\_pools\_labels | Map of maps containing node labels by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | | node\_pools\_linux\_node\_configs\_sysctls | Map of maps containing linux node config sysctls by node-pool name | `map(map(string))` |
{
"all": {},
"default-node-pool": {}
}
| no | @@ -232,7 +226,6 @@ Then perform the following commands on the root folder: | shadow\_firewall\_rules\_log\_config | The log\_config for shadow firewall rules. You can set this variable to `null` to disable logging. |
object({
metadata = string
})
|
{
"metadata": "INCLUDE_ALL_METADATA"
}
| no | | shadow\_firewall\_rules\_priority | The firewall priority of GKE shadow firewall rules. The priority should be less than default firewall, which is 1000. | `number` | `999` | no | | skip\_provisioners | Flag to skip all local-exec provisioners. It breaks `stub_domains` and `upstream_nameservers` variables functionality. | `bool` | `false` | no | -| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional) | `string` | `"SURGE"` | no | | stub\_domains | Map of stub domains and their resolvers to forward DNS queries for a certain domain to an external DNS server | `map(list(string))` | `{}` | no | | subnetwork | The subnetwork to host the cluster in (required) | `string` | n/a | yes | | timeouts | Timeout for cluster operations. | `map(string)` | `{}` | no | @@ -315,13 +308,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index 5149f82ff4..99a0f86e15 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -488,12 +488,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -712,12 +712,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/beta-public-cluster/variables.tf b/modules/beta-public-cluster/variables.tf index 6a1e56e88f..970d25f40d 100644 --- a/modules/beta-public-cluster/variables.tf +++ b/modules/beta-public-cluster/variables.tf @@ -667,6 +667,7 @@ variable "enable_pod_security_policy" { default = false } + variable "enable_l4_ilb_subsetting" { type = bool description = "Enable L4 ILB Subsetting on the cluster" @@ -690,45 +691,3 @@ variable "enable_identity_service" { description = "Enable the Identity Service component, which allows customers to use external identity providers with the K8S API." default = false } - -variable "strategy" { - type = string - description = "The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`; `BLUE_GREEN`. By default strategy is `SURGE` (Optional)" - default = "SURGE" -} - -variable "max_surge" { - type = number - description = "The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater (Optional)" - default = null -} - -variable "max_unavailable" { - type = number - description = "The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater (Optional)" - default = null -} - -variable "node_pool_soak_duration" { - type = string - description = "Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up (Optional)" - default = "3600s" -} - -variable "batch_soak_duration" { - type = string - description = "Soak time after each batch gets drained (Optionial)" - default = "0s" -} - -variable "batch_percentage" { - type = string - description = "Percentage of the blue pool nodes to drain in a batch (Optional)" - default = null -} - -variable "batch_node_count" { - type = number - description = "The number of blue nodes to drain in a batch (Optional)" - default = null -} diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 67fd459cbf..e18aee380a 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -313,13 +313,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index 077f6983e3..adfea3660a 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -526,12 +526,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -713,12 +713,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index 88fe8f8629..9ee6ffdef6 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -291,13 +291,13 @@ The node_pools variable takes the following parameters: | total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional | -| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | -| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). | "3600s" | Optional | -| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. | "0s" | Optional | -| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage` | 1 | Optional | -| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count` | null | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional | +| max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. Only works with `SURGE` strategy. | 0 | Optional | +| node_pool_soak_duration | Time needed after draining the entire blue pool. After this period, the blue pool will be cleaned up. By default, it is set to one hour (3600 seconds). The maximum length of the soak time is 7 days (604,800 seconds). Only works with `BLUE_GREEN` strategy. | "3600s" | Optional | +| batch_soak_duration | Soak time after each batch gets drained, with the default being zero seconds. Only works with `BLUE_GREEN` strategy. | "0s" | Optional | +| batch_node_count | Absolute number of nodes to drain in a batch. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_percentage`. Only works with `BLUE_GREEN` strategy. | 1 | Optional | +| batch_percentage | Percentage of nodes to drain in a batch. Must be in the range of [0.0, 1.0]. If it is set to zero, this phase will be skipped. Cannot be used together with `batch_node_count`. Only works with `BLUE_GREEN` strategy. | null | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | | total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index b6399c512d..ae67d7d403 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -432,12 +432,12 @@ resource "google_container_node_pool" "pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } } @@ -618,12 +618,12 @@ resource "google_container_node_pool" "windows_pools" { dynamic "blue_green_settings" { for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : [] content { - node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", "3600s") + node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null) standard_rollout_policy { - batch_soak_duration = lookup(each.value, "batch_soak_duration", "0s") + batch_soak_duration = lookup(each.value, "batch_soak_duration", null) batch_percentage = lookup(each.value, "batch_percentage", null) - batch_node_count = lookup(each.value, "batch_node_count", 1) + batch_node_count = lookup(each.value, "batch_node_count", null) } } }