Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Breaking change: Rework taint model in GKE #15959

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changelog/9011.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:breaking-change
container: reworked the `taint` field in `google_container_cluster` and `google_container_node_pool` to only manage a subset of taint keys based on those already in state. Most existing resources are unaffected, unless they use `sandbox_config`- see upgrade guide for details.
```
```release-note:enhancement
container: added the `effective_taints` attribute to `google_container_cluster` and `google_container_node_pool`, outputting all known taint values
```
81 changes: 70 additions & 11 deletions google/services/container/node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -380,14 +380,9 @@ func schemaNodeConfig() *schema.Schema {
},

"taint": {
Type: schema.TypeList,
Optional: true,
// Computed=true because GKE Sandbox will automatically add taints to nodes that can/cannot run sandboxed pods.
Computed: true,
ForceNew: true,
// Legacy config mode allows explicitly defining an empty taint.
// See https://www.terraform.io/docs/configuration/attr-as-blocks.html
ConfigMode: schema.SchemaConfigModeAttr,
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Description: `List of Kubernetes taints to be applied to each node.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
Expand All @@ -414,6 +409,31 @@ func schemaNodeConfig() *schema.Schema {
},
},

"effective_taints": {
Type: schema.TypeList,
Computed: true,
Description: `List of kubernetes taints applied to each node.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
Computed: true,
Description: `Key for taint.`,
},
"value": {
Type: schema.TypeString,
Computed: true,
Description: `Value for taint.`,
},
"effect": {
Type: schema.TypeString,
Computed: true,
Description: `Effect for taint.`,
},
},
},
},

"workload_metadata_config": {
Computed: true,
Type: schema.TypeList,
Expand Down Expand Up @@ -828,8 +848,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
Value: data["value"].(string),
Effect: data["effect"].(string),
}

nodeTaints = append(nodeTaints, taint)
}

nc.Taints = nodeTaints
}

Expand Down Expand Up @@ -991,13 +1013,24 @@ func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]int
return result
}

func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
// v == old state of `node_config`
func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]interface{} {
config := make([]map[string]interface{}, 0, 1)

if c == nil {
return config
}

// default to no prior taint state if there are any issues
oldTaints := []interface{}{}
oldNodeConfigSchemaContainer := v.([]interface{})
if len(oldNodeConfigSchemaContainer) != 0 {
oldNodeConfigSchema := oldNodeConfigSchemaContainer[0].(map[string]interface{})
if vt, ok := oldNodeConfigSchema["taint"]; ok && len(vt.([]interface{})) > 0 {
oldTaints = vt.([]interface{})
}
}

config = append(config, map[string]interface{}{
"machine_type": c.MachineType,
"disk_size_gb": c.DiskSizeGb,
Expand All @@ -1020,7 +1053,8 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
"spot": c.Spot,
"min_cpu_platform": c.MinCpuPlatform,
"shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig),
"taint": flattenTaints(c.Taints),
"taint": flattenTaints(c.Taints, oldTaints),
"effective_taints": flattenEffectiveTaints(c.Taints),
"workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig),
"confidential_nodes": flattenConfidentialNodes(c.ConfidentialNodes),
"boot_disk_kms_key": c.BootDiskKmsKey,
Expand Down Expand Up @@ -1148,7 +1182,31 @@ func flattenGKEReservationAffinity(c *container.ReservationAffinity) []map[strin
return result
}

func flattenTaints(c []*container.NodeTaint) []map[string]interface{} {
// flattenTaints records the set of taints already present in state.
func flattenTaints(c []*container.NodeTaint, oldTaints []interface{}) []map[string]interface{} {
taintKeys := map[string]struct{}{}
for _, raw := range oldTaints {
data := raw.(map[string]interface{})
taintKey := data["key"].(string)
taintKeys[taintKey] = struct{}{}
}

result := []map[string]interface{}{}
for _, taint := range c {
if _, ok := taintKeys[taint.Key]; ok {
result = append(result, map[string]interface{}{
"key": taint.Key,
"value": taint.Value,
"effect": taint.Effect,
})
}
}

return result
}

// flattenEffectiveTaints records the complete set of taints returned from GKE.
func flattenEffectiveTaints(c []*container.NodeTaint) []map[string]interface{} {
result := []map[string]interface{}{}
for _, taint := range c {
result = append(result, map[string]interface{}{
Expand All @@ -1157,6 +1215,7 @@ func flattenTaints(c []*container.NodeTaint) []map[string]interface{} {
"effect": taint.Effect,
})
}

return result
}

Expand Down
2 changes: 1 addition & 1 deletion google/services/container/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2421,7 +2421,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("Error setting default_max_pods_per_node: %s", err)
}
}
if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig)); err != nil {
if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig, d.Get("node_config"))); err != nil {
return err
}
if err := d.Set("project", project); err != nil {
Expand Down
14 changes: 8 additions & 6 deletions google/services/container/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -983,17 +983,19 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
Config: testAccContainerCluster_withNodeConfig(clusterName),
},
{
ResourceName: "google_container_cluster.with_node_config",
ImportState: true,
ImportStateVerify: true,
ResourceName: "google_container_cluster.with_node_config",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"node_config.0.taint"},
},
{
Config: testAccContainerCluster_withNodeConfigUpdate(clusterName),
},
{
ResourceName: "google_container_cluster.with_node_config",
ImportState: true,
ImportStateVerify: true,
ResourceName: "google_container_cluster.with_node_config",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"node_config.0.taint"},
},
},
})
Expand Down
2 changes: 1 addition & 1 deletion google/services/container/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1055,7 +1055,7 @@ func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *c
"initial_node_count": np.InitialNodeCount,
"node_locations": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(np.Locations)),
"node_count": nodeCount,
"node_config": flattenNodeConfig(np.Config),
"node_config": flattenNodeConfig(np.Config, d.Get(prefix+"node_config")),
"instance_group_urls": igmUrls,
"managed_instance_group_urls": managedIgmUrls,
"version": np.Version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#"},
ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"},
},
{
Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool),
Expand All @@ -227,7 +227,7 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#"},
ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"},
},
},
})
Expand Down
17 changes: 9 additions & 8 deletions website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -888,14 +888,13 @@ gvnic {
* `tags` - (Optional) The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls.

* `taint` - (Optional) A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
to apply to nodes. GKE's API can only set this field on cluster creation.
However, GKE will add taints to your nodes if you enable certain features such
as GPUs. If this field is set, any diffs on this field will cause Terraform to
recreate the underlying resource. Taint values can be updated safely in
Kubernetes (eg. through `kubectl`), and it's recommended that you do not use
this field to manage taints. If you do, `lifecycle.ignore_changes` is
recommended. Structure is [documented below](#nested_taint).
* `taint` - (Optional) A list of
[Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
to apply to nodes. This field will only report drift on taint keys that are
already managed with Terraform, use `effective_taints` to view the list of
GKE-managed taints on the node pool from all sources. Importing this resource
will not record any taints as being Terraform-managed, and will cause drift with
any configured taints. Structure is [documented below](#nested_taint).

* `workload_metadata_config` - (Optional) Metadata configuration to expose to workloads on the node pool.
Structure is [documented below](#nested_workload_metadata_config).
Expand Down Expand Up @@ -1310,6 +1309,8 @@ exported:

* `cluster_autoscaling.0.auto_provisioning_defaults.0.management.0.upgrade_options` - Specifies the [Auto Upgrade knobs](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/NodeManagement#AutoUpgradeOptions) for the node pool.

* `node_config.0.effective_taints` - List of kubernetes taints applied to each node. Structure is [documented above](#nested_taint).

## Timeouts

This resource provides the following
Expand Down