diff --git a/README.md b/README.md index 2212da8113..7e51d76f51 100644 --- a/README.md +++ b/README.md @@ -274,11 +274,13 @@ The node_pools variable takes the following parameters: | local_ssd_count | The amount of local SSD disks that will be attached to each cluster node and may be used as a `hostpath` volume or a `local` PersistentVolume. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | diff --git a/autogen/main/README.md b/autogen/main/README.md index 40182f7c34..fd35eb0395 100644 --- a/autogen/main/README.md +++ b/autogen/main/README.md @@ -210,11 +210,13 @@ The node_pools variable takes the following parameters: {% endif %} | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | {% if beta_cluster %} | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | diff --git a/autogen/main/cluster.tf.tmpl b/autogen/main/cluster.tf.tmpl index 5d2b3cc9ff..3c89358ec9 100644 --- a/autogen/main/cluster.tf.tmpl +++ b/autogen/main/cluster.tf.tmpl @@ -652,8 +652,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/cluster.tf b/cluster.tf index 49520613ee..42754b0e65 100644 --- a/cluster.tf +++ b/cluster.tf @@ -391,8 +391,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -563,8 +563,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/beta-private-cluster-update-variant/README.md b/modules/beta-private-cluster-update-variant/README.md index d084f718f8..3d4460ff1e 100644 --- a/modules/beta-private-cluster-update-variant/README.md +++ b/modules/beta-private-cluster-update-variant/README.md @@ -339,11 +339,13 @@ The node_pools variable takes the following parameters: | local_ssd_ephemeral_count | The amount of local SSD disks that will be attached to each cluster node and assigned as scratch space as an `emptyDir` volume. If unspecified, ephemeral storage is backed by the cluster node boot disk. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | | pod_range | The name of the secondary range for pod IPs. | | Optional | diff --git a/modules/beta-private-cluster-update-variant/cluster.tf b/modules/beta-private-cluster-update-variant/cluster.tf index 5eb7470a18..aad85bcdd2 100644 --- a/modules/beta-private-cluster-update-variant/cluster.tf +++ b/modules/beta-private-cluster-update-variant/cluster.tf @@ -566,8 +566,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -778,8 +778,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/beta-private-cluster/README.md b/modules/beta-private-cluster/README.md index dde80c4623..187319a38f 100644 --- a/modules/beta-private-cluster/README.md +++ b/modules/beta-private-cluster/README.md @@ -317,11 +317,13 @@ The node_pools variable takes the following parameters: | local_ssd_ephemeral_count | The amount of local SSD disks that will be attached to each cluster node and assigned as scratch space as an `emptyDir` volume. If unspecified, ephemeral storage is backed by the cluster node boot disk. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | | pod_range | The name of the secondary range for pod IPs. | | Optional | diff --git a/modules/beta-private-cluster/cluster.tf b/modules/beta-private-cluster/cluster.tf index bd72dc97c9..50cd158ded 100644 --- a/modules/beta-private-cluster/cluster.tf +++ b/modules/beta-private-cluster/cluster.tf @@ -472,8 +472,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -683,8 +683,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/beta-public-cluster-update-variant/README.md b/modules/beta-public-cluster-update-variant/README.md index b366957d8f..8b027d13f6 100644 --- a/modules/beta-public-cluster-update-variant/README.md +++ b/modules/beta-public-cluster-update-variant/README.md @@ -326,11 +326,13 @@ The node_pools variable takes the following parameters: | local_ssd_ephemeral_count | The amount of local SSD disks that will be attached to each cluster node and assigned as scratch space as an `emptyDir` volume. If unspecified, ephemeral storage is backed by the cluster node boot disk. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | | pod_range | The name of the secondary range for pod IPs. | | Optional | diff --git a/modules/beta-public-cluster-update-variant/cluster.tf b/modules/beta-public-cluster-update-variant/cluster.tf index 6b348b6111..acf9d3b29b 100644 --- a/modules/beta-public-cluster-update-variant/cluster.tf +++ b/modules/beta-public-cluster-update-variant/cluster.tf @@ -547,8 +547,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -758,8 +758,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/beta-public-cluster/README.md b/modules/beta-public-cluster/README.md index c018d01fd6..74a82dfa1c 100644 --- a/modules/beta-public-cluster/README.md +++ b/modules/beta-public-cluster/README.md @@ -304,11 +304,13 @@ The node_pools variable takes the following parameters: | local_ssd_ephemeral_count | The amount of local SSD disks that will be attached to each cluster node and assigned as scratch space as an `emptyDir` volume. If unspecified, ephemeral storage is backed by the cluster node boot disk. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | placement_policy | Placement type to set for nodes in a node pool. Can be set as [COMPACT](https://cloud.google.com/kubernetes-engine/docs/how-to/compact-placement#overview) if desired | Optional | | pod_range | The name of the secondary range for pod IPs. | | Optional | diff --git a/modules/beta-public-cluster/cluster.tf b/modules/beta-public-cluster/cluster.tf index d21864f65b..623909ad09 100644 --- a/modules/beta-public-cluster/cluster.tf +++ b/modules/beta-public-cluster/cluster.tf @@ -453,8 +453,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -663,8 +663,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/private-cluster-update-variant/README.md b/modules/private-cluster-update-variant/README.md index 8d952ab9c1..f3519d5cee 100644 --- a/modules/private-cluster-update-variant/README.md +++ b/modules/private-cluster-update-variant/README.md @@ -309,11 +309,13 @@ The node_pools variable takes the following parameters: | local_ssd_count | The amount of local SSD disks that will be attached to each cluster node and may be used as a `hostpath` volume or a `local` PersistentVolume. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | diff --git a/modules/private-cluster-update-variant/cluster.tf b/modules/private-cluster-update-variant/cluster.tf index eafefad05f..103dc241c6 100644 --- a/modules/private-cluster-update-variant/cluster.tf +++ b/modules/private-cluster-update-variant/cluster.tf @@ -504,8 +504,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -677,8 +677,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) diff --git a/modules/private-cluster/README.md b/modules/private-cluster/README.md index e82ae76c2b..94b9c58d98 100644 --- a/modules/private-cluster/README.md +++ b/modules/private-cluster/README.md @@ -287,11 +287,13 @@ The node_pools variable takes the following parameters: | local_ssd_count | The amount of local SSD disks that will be attached to each cluster node and may be used as a `hostpath` volume or a `local` PersistentVolume. | 0 | Optional | | machine_type | The name of a Google Compute Engine machine type | e2-medium | Optional | | min_cpu_platform | Minimum CPU platform to be used by the nodes in the pool. The nodes may be scheduled on the specified or newer CPU platform. | " " | Optional | -| max_count | Maximum number of nodes in the NodePool. Must be >= min_count | 100 | Optional | +| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional | +| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional | | max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional | | max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. | 1 | Optional | | max_unavailable | The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater. | 0 | Optional | -| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true | 1 | Optional | +| min_count | Minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with total limits. | 1 | Optional | +| total_min_count | Total minimum number of nodes in the NodePool. Must be >=0 and <= max_count. Should be used when autoscaling is true. Cannot be used with per zone limits. | null | Optional | | name | The name of the node pool | | Required | | node_count | The number of nodes in the nodepool when autoscaling is false. Otherwise defaults to 1. Only valid for non-autoscaling clusters | | Required | | node_locations | The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. Defaults to cluster level node locations if nothing is specified | " " | Optional | diff --git a/modules/private-cluster/cluster.tf b/modules/private-cluster/cluster.tf index 9711d27d98..db33e44a91 100644 --- a/modules/private-cluster/cluster.tf +++ b/modules/private-cluster/cluster.tf @@ -410,8 +410,8 @@ resource "google_container_node_pool" "pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null) @@ -582,8 +582,8 @@ resource "google_container_node_pool" "windows_pools" { dynamic "autoscaling" { for_each = lookup(each.value, "autoscaling", true) ? [each.value] : [] content { - min_node_count = lookup(autoscaling.value, "min_count", 1) - max_node_count = lookup(autoscaling.value, "max_count", 100) + min_node_count = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1) + max_node_count = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100) location_policy = lookup(autoscaling.value, "location_policy", null) total_min_node_count = lookup(autoscaling.value, "total_min_count", null) total_max_node_count = lookup(autoscaling.value, "total_max_count", null)