Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: use dynamic block for accelerators, updates for CI #1428

Merged
merged 9 commits into from
Oct 19, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions autogen/main/cluster.tf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -722,17 +722,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
31 changes: 16 additions & 15 deletions build/int.cloudbuild.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -131,21 +131,22 @@ steps:
- verify simple-regional-with-networking-local
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-networking-local']
- id: converge simple-zonal-local
waitFor:
- create all
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local']
- id: verify simple-zonal-local
waitFor:
- converge simple-zonal-local
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local']
- id: destroy simple-zonal-local
waitFor:
- verify simple-zonal-local
name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local']
# TODO(bharathkkb): https://github.com/terraform-google-modules/terraform-google-kubernetes-engine/issues/1431
# - id: converge simple-zonal-local
# waitFor:
# - create all
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-zonal-local']
# - id: verify simple-zonal-local
# waitFor:
# - converge simple-zonal-local
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-zonal-local']
# - id: destroy simple-zonal-local
# waitFor:
# - verify simple-zonal-local
# name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS'
# args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-zonal-local']
- id: converge simple-zonal-private-local
waitFor:
- create all
Expand Down
22 changes: 8 additions & 14 deletions cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -407,17 +407,14 @@ resource "google_container_node_pool" "pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down Expand Up @@ -560,17 +557,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
18 changes: 0 additions & 18 deletions examples/node_pool/data/shutdown-script.sh

This file was deleted.

4 changes: 2 additions & 2 deletions examples/node_pool/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ module "gke" {
ip_range_pods = var.ip_range_pods
ip_range_services = var.ip_range_services
create_service_account = false
remove_default_node_pool = true
remove_default_node_pool = false
disable_legacy_metadata_endpoints = false
cluster_autoscaling = var.cluster_autoscaling

Expand Down Expand Up @@ -81,7 +81,7 @@ module "gke" {

node_pools_metadata = {
pool-01 = {
shutdown-script = file("${path.module}/data/shutdown-script.sh")
shutdown-script = "kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\""
}
}

Expand Down
3 changes: 0 additions & 3 deletions examples/safer_cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,6 @@ module "gke" {
},
]

istio = true
cloudrun = true

notification_config_topic = google_pubsub_topic.updates.id
}

Expand Down
2 changes: 0 additions & 2 deletions examples/simple_regional_beta/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ This example illustrates how to create a simple cluster with beta features.

| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| cloudrun | Boolean to enable / disable CloudRun | `bool` | `true` | no |
| cluster\_name\_suffix | A suffix to append to the default cluster name | `string` | `""` | no |
| compute\_engine\_service\_account | Service account to associate to the nodes in the cluster | `any` | n/a | yes |
| database\_encryption | Application-layer Secrets Encryption settings. The object format is {state = string, key\_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key\_name is the name of a CloudKMS key. | `list(object({ state = string, key_name = string }))` | <pre>[<br> {<br> "key_name": "",<br> "state": "DECRYPTED"<br> }<br>]</pre> | no |
Expand All @@ -18,7 +17,6 @@ This example illustrates how to create a simple cluster with beta features.
| gce\_pd\_csi\_driver | (Beta) Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. | `bool` | `false` | no |
| ip\_range\_pods | The secondary ip range to use for pods | `any` | n/a | yes |
| ip\_range\_services | The secondary ip range to use for services | `any` | n/a | yes |
| istio | Boolean to enable / disable Istio | `bool` | `true` | no |
| network | The VPC network to host the cluster in | `any` | n/a | yes |
| node\_pools | List of maps containing node pools | `list(map(string))` | <pre>[<br> {<br> "name": "default-node-pool"<br> }<br>]</pre> | no |
| project\_id | The project ID to host the cluster in | `any` | n/a | yes |
Expand Down
2 changes: 0 additions & 2 deletions examples/simple_regional_beta/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@ module "gke" {
ip_range_services = var.ip_range_services
create_service_account = var.compute_engine_service_account == "create"
service_account = var.compute_engine_service_account
istio = var.istio
cloudrun = var.cloudrun
dns_cache = var.dns_cache
gce_pd_csi_driver = var.gce_pd_csi_driver
sandbox_enabled = var.sandbox_enabled
Expand Down
10 changes: 0 additions & 10 deletions examples/simple_regional_beta/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,6 @@ variable "compute_engine_service_account" {
description = "Service account to associate to the nodes in the cluster"
}

variable "istio" {
description = "Boolean to enable / disable Istio"
default = true
}

variable "cloudrun" {
description = "Boolean to enable / disable CloudRun"
default = true
}

variable "dns_cache" {
type = bool
description = "(Beta) The status of the NodeLocal DNSCache addon."
Expand Down
2 changes: 0 additions & 2 deletions examples/simple_regional_private_beta/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ module "gke" {

enable_confidential_nodes = true

istio = var.istio
cloudrun = var.cloudrun
dns_cache = var.dns_cache
gce_pd_csi_driver = var.gce_pd_csi_driver
}
10 changes: 0 additions & 10 deletions examples/simple_regional_private_beta/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,6 @@ variable "compute_engine_service_account" {
description = "Service account to associate to the nodes in the cluster"
}

variable "istio" {
description = "Boolean to enable / disable Istio"
default = true
}

variable "cloudrun" {
description = "Boolean to enable / disable CloudRun"
default = true
}

variable "dns_cache" {
description = "Boolean to enable / disable NodeLocal DNSCache "
default = false
Expand Down
22 changes: 8 additions & 14 deletions modules/beta-private-cluster-update-variant/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -638,17 +638,14 @@ resource "google_container_node_pool" "pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down Expand Up @@ -846,17 +843,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
22 changes: 8 additions & 14 deletions modules/beta-private-cluster/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -547,17 +547,14 @@ resource "google_container_node_pool" "pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down Expand Up @@ -754,17 +751,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
22 changes: 8 additions & 14 deletions modules/beta-public-cluster-update-variant/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -619,17 +619,14 @@ resource "google_container_node_pool" "pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down Expand Up @@ -827,17 +824,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
22 changes: 8 additions & 14 deletions modules/beta-public-cluster/cluster.tf
Original file line number Diff line number Diff line change
Expand Up @@ -528,17 +528,14 @@ resource "google_container_node_pool" "pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down Expand Up @@ -735,17 +732,14 @@ resource "google_container_node_pool" "windows_pools" {
local.node_pools_oauth_scopes[each.value["name"]],
)

guest_accelerator = [
for guest_accelerator in lookup(each.value, "accelerator_count", 0) > 0 ? [{
dynamic "guest_accelerator" {
for_each = lookup(each.value, "accelerator_count", 0) > 0 ? [1] : []
content {
type = lookup(each.value, "accelerator_type", "")
count = lookup(each.value, "accelerator_count", 0)
gpu_partition_size = lookup(each.value, "gpu_partition_size", null)
}] : [] : {
type = guest_accelerator["type"]
count = guest_accelerator["count"]
gpu_partition_size = guest_accelerator["gpu_partition_size"]
}
]
}

dynamic "workload_metadata_config" {
for_each = local.cluster_node_metadata_config
Expand Down
Loading