Skip to content

Commit

Permalink
Add Resource Manager Tags support to 'google_container_cluster' (#9531)…
Browse files Browse the repository at this point in the history
… (#17346)

* resourceManagerTags added to Cluster Node Config schema

* update beta tag

* add cluster and node proto tests

* add expand and flatten proto

* removed beta tag

* added to documentation

* added resource manager tags to auto pilot

* migrating resourceManagerTags tests

* migrating node_pools test

* migrating additional tests

* minor fixes

* fixing tests

* add in-place update support

* fixed tests

* fixed annotations

* validated clusters and node pools tests. Isolated node pool auto config

* isolated resource manager tags from docs

* fixed permission issue

* fixed spaces

* fixed non determinism on tag keys

* removed auto_pilot rmts

* fixed time_sleep

* add depends_on to IAM policies

[upstream:343ff46df5008cc457392c3baafd0acc6dc97633]

Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician authored Feb 21, 2024
1 parent a381c88 commit 71a9b7a
Show file tree
Hide file tree
Showing 8 changed files with 615 additions and 7 deletions.
6 changes: 6 additions & 0 deletions .changelog/9531.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
container: added `node_config.resource_manager_tags` field to `google_container_cluster` resource
```
```release-note:enhancement
container: added `node_config.resource_manager_tags` field to `google_container_node_pool` resource
```
2 changes: 1 addition & 1 deletion google/services/compute/resource_compute_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,7 +633,7 @@ func ResourceComputeInstance() *schema.Resource {
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: `A set of key/value label pairs assigned to the instance.
**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field 'effective_labels' for all of the labels present on the resource.`,
},
Expand Down
36 changes: 36 additions & 0 deletions google/services/container/node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -625,6 +625,11 @@ func schemaNodeConfig() *schema.Schema {
},
},
},
"resource_manager_tags": {
Type: schema.TypeMap,
Optional: true,
Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`,
},
},
},
}
Expand Down Expand Up @@ -810,6 +815,10 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.ResourceLabels = m
}

if v, ok := nodeConfig["resource_manager_tags"]; ok && len(v.(map[string]interface{})) > 0 {
nc.ResourceManagerTags = expandResourceManagerTags(v)
}

if v, ok := nodeConfig["tags"]; ok {
tagsList := v.([]interface{})
tags := []string{}
Expand Down Expand Up @@ -894,6 +903,19 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
return nc
}

func expandResourceManagerTags(v interface{}) *container.ResourceManagerTags {
rmts := make(map[string]string)

if v != nil {
rmts = tpgresource.ConvertStringMap(v.(map[string]interface{}))
}

return &container.ResourceManagerTags{
Tags: rmts,
ForceSendFields: []string{"Tags"},
}
}

func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConfig {
if v == nil {
return nil
Expand Down Expand Up @@ -1090,6 +1112,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
"advanced_machine_features": flattenAdvancedMachineFeaturesConfig(c.AdvancedMachineFeatures),
"sole_tenant_config": flattenSoleTenantConfig(c.SoleTenantConfig),
"fast_socket": flattenFastSocket(c.FastSocket),
"resource_manager_tags": flattenResourceManagerTags(c.ResourceManagerTags),
})

if len(c.OauthScopes) > 0 {
Expand All @@ -1099,6 +1122,19 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
return config
}

func flattenResourceManagerTags(c *container.ResourceManagerTags) map[string]interface{} {
rmt := make(map[string]interface{})

if c != nil {
for k, v := range c.Tags {
rmt[k] = v
}

}

return rmt
}

func flattenAdvancedMachineFeaturesConfig(c *container.AdvancedMachineFeatures) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
Expand Down
2 changes: 2 additions & 0 deletions google/services/container/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ var (
forceNewClusterNodeConfigFields = []string{
"labels",
"workload_metadata_config",
"resource_manager_tags",
}

suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool {
Expand Down Expand Up @@ -4901,6 +4902,7 @@ func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoCon
if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 {
npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v)
}

return npac
}

Expand Down
128 changes: 124 additions & 4 deletions google/services/container/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,43 @@ func TestAccContainerCluster_basic(t *testing.T) {
})
}

func TestAccContainerCluster_resourceManagerTags(t *testing.T) {
t.Parallel()

pid := envvar.GetTestProjectFromEnv()

randomSuffix := acctest.RandString(t, 10)
clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix)

networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
ExternalProviders: map[string]resource.ExternalProvider{
"time": {},
},
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"),
resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_config.0.resource_manager_tags.%"),
),
},
{
ResourceName: "google_container_cluster.primary",
ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName),
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"},
},
},
})
}

func TestAccContainerCluster_networkingModeRoutes(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -3805,11 +3842,11 @@ func testAccContainerCluster_withIncompatibleMasterVersionNodeVersion(name strin
resource "google_container_cluster" "gke_cluster" {
name = "%s"
location = "us-central1"
min_master_version = "1.10.9-gke.5"
node_version = "1.10.6-gke.11"
initial_node_count = 1
}
`, name)
}
Expand Down Expand Up @@ -5824,7 +5861,7 @@ resource "google_container_cluster" "with_autoprovisioning" {
min_master_version = data.google_container_engine_versions.central1a.latest_master_version
initial_node_count = 1
deletion_protection = false
network = "%s"
subnetwork = "%s"
Expand Down Expand Up @@ -8095,7 +8132,7 @@ resource "google_compute_resource_policy" "policy" {
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1-a"
node_pool {
name = "%s"
initial_node_count = 2
Expand Down Expand Up @@ -8196,3 +8233,86 @@ func testAccContainerCluster_additional_pod_ranges_config(name string, nameCount
}
`, name, name, name, aprc)
}

func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string {
return fmt.Sprintf(`
data "google_project" "project" {
project_id = "%[1]s"
}
resource "google_project_iam_binding" "tagHoldAdmin" {
project = "%[1]s"
role = "roles/resourcemanager.tagHoldAdmin"
members = [
"serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com",
]
}
resource "google_project_iam_binding" "tagUser" {
project = "%[1]s"
role = "roles/resourcemanager.tagUser"
members = [
"serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com",
"serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com",
]
depends_on = [google_project_iam_binding.tagHoldAdmin]
}
resource "time_sleep" "wait_120_seconds" {
create_duration = "120s"
depends_on = [
google_project_iam_binding.tagHoldAdmin,
google_project_iam_binding.tagUser
]
}
resource "google_tags_tag_key" "key" {
parent = "projects/%[1]s"
short_name = "foobarbaz-%[2]s"
description = "For foo/bar resources"
purpose = "GCE_FIREWALL"
purpose_data = {
network = "%[1]s/%[4]s"
}
}
resource "google_tags_tag_value" "value" {
parent = "tagKeys/${google_tags_tag_key.key.name}"
short_name = "foo-%[2]s"
description = "For foo resources"
}
data "google_container_engine_versions" "uscentral1a" {
location = "us-central1-a"
}
resource "google_container_cluster" "primary" {
name = "%[3]s"
location = "us-central1-a"
min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"]
initial_node_count = 1
node_config {
machine_type = "n1-standard-1" // can't be e2 because of local-ssd
disk_size_gb = 15
resource_manager_tags = {
"tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}"
}
}
deletion_protection = false
network = "%[4]s"
subnetwork = "%[5]s"
timeouts {
create = "30m"
update = "40m"
}
depends_on = [time_sleep.wait_120_seconds]
}
`, projectID, randomSuffix, clusterName, networkName, subnetworkName)
}
42 changes: 42 additions & 0 deletions google/services/container/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1436,6 +1436,48 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
log.Printf("[INFO] Updated tags for node pool %s", name)
}

if d.HasChange(prefix + "node_config.0.resource_manager_tags") {
req := &container.UpdateNodePoolRequest{
Name: name,
}
if v, ok := d.GetOk(prefix + "node_config.0.resource_manager_tags"); ok {
req.ResourceManagerTags = expandResourceManagerTags(v)
}

// sets resource manager tags to the empty list when user removes a previously defined list of tags entriely
// aka the node pool goes from having tags to no longer having any
if req.ResourceManagerTags == nil {
tags := make(map[string]string)
rmTags := &container.ResourceManagerTags{
Tags: tags,
}
req.ResourceManagerTags = rmTags
}

updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()
if err != nil {
return err
}

// Wait until it's updated
return ContainerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool resource manager tags", userAgent,
timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] Updated resource manager tags for node pool %s", name)
}

if d.HasChange(prefix + "node_config.0.resource_labels") {
req := &container.UpdateNodePoolRequest{
Name: name,
Expand Down
Loading

0 comments on commit 71a9b7a

Please sign in to comment.