diff --git a/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb index ed58a77a3072..ba523c6279a0 100644 --- a/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb @@ -265,7 +265,6 @@ var schemaNodePool = map[string]*schema.Schema{ Description: `The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way.`, }, -<% unless version == 'ga' -%> "network_config": { Type: schema.TypeList, Optional: true, @@ -280,10 +279,15 @@ var schemaNodePool = map[string]*schema.Schema{ ForceNew: true, Description: `Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.`, }, - + "enable_private_nodes": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether nodes have internal IP addresses only.`, + }, "pod_range": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Description: `The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.`, }, @@ -298,7 +302,6 @@ var schemaNodePool = map[string]*schema.Schema{ }, }, }, -<% end -%> } @@ -772,9 +775,7 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, Config: expandNodeConfig(d.Get(prefix + "node_config")), Locations: locations, Version: d.Get(prefix + "version").(string), -<% unless version == 'ga' -%> NetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), -<% end -%> } if v, ok := d.GetOk(prefix + "autoscaling"); ok { @@ -878,9 +879,7 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP "instance_group_urls": igmUrls, "managed_instance_group_urls": managedIgmUrls, "version": np.Version, -<% unless version == 'ga' -%> "network_config": flattenNodeNetworkConfig(np.NetworkConfig, d, prefix), -<% end -%> } if np.Autoscaling != nil { @@ -934,14 +933,14 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP return nodePool, nil } -<% unless version == 'ga' -%> func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.ResourceData, prefix string) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { result = append(result, map[string]interface{}{ - "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required - "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, - "pod_range": c.PodRange, + "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required + "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, + "pod_range": c.PodRange, + "enable_private_nodes": c.EnablePrivateNodes, }) } return result @@ -970,12 +969,15 @@ func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { nnc.PodIpv4CidrBlock = v.(string) } + if v, ok := networkNodeConfig["enable_private_nodes"]; ok { + nnc.EnablePrivateNodes = v.(bool) + nnc.ForceSendFields = []string{"EnablePrivateNodes"} + } + return nnc } -<% end -%> - func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error { config := meta.(*Config) name := d.Get(prefix + "name").(string) @@ -1393,6 +1395,40 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) } + if d.HasChange(prefix + "network_config") { + if d.HasChange(prefix + "network_config.0.enable_private_nodes") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + NodeNetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool workload_metadata_config", userAgent, + timeout) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) + } + } + return nil } diff --git a/mmv1/third_party/terraform/tests/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/tests/resource_container_node_pool_test.go.erb index d5b35198e4fc..092df81a9b2e 100644 --- a/mmv1/third_party/terraform/tests/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/tests/resource_container_node_pool_test.go.erb @@ -447,7 +447,6 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { } <% end -%> -<% unless version == 'ga' -%> func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { t.Parallel() @@ -478,7 +477,100 @@ func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { }, }) } -<% end -%> + + +func TestAccContainerNodePool_withEnablePrivateNodesToggle(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "true"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "false"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + + +func testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, flag string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.23" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } +} + +resource "google_container_node_pool" "with_enable_private_nodes" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + enable_private_nodes = %s + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} +`, network, cluster, np, flag) +} <% unless version.nil? || version == 'ga' -%> func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) { @@ -2065,7 +2157,6 @@ resource "google_container_node_pool" "with_linux_node_config" { <% end -%> -<% unless version == 'ga' -%> func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { @@ -2154,7 +2245,6 @@ resource "google_container_node_pool" "with_auto_pod_cidr" { `, network, cluster, np, np) } -<% end -%> <% unless version.nil? || version == 'ga' -%> diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 3546c12814bf..dd8d626b5b41 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -255,9 +255,6 @@ region are guaranteed to support the same version. manages the default node pool, which isn't recommended to be used with Terraform. Structure is [documented below](#nested_node_config). -* `network_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Configuration for - [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is [documented below](#nested_network_config) - * `node_pool` - (Optional) List of node pools associated with this cluster. See [google_container_node_pool](container_node_pool.html) for schema. **Warning:** node pools defined inside a cluster can't be changed (or added/removed) after @@ -854,14 +851,6 @@ linux_node_config { * `node_group` - (Optional) Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes). -The `network_config` block supports: - -* `create_pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified. - -* `pod_ipv4_cidr_block` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. - -* `pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID. - The `ephemeral_storage_config` block supports: * `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. diff --git a/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown index e7b3b5d26ab0..d2e1d4831dd9 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown @@ -148,8 +148,9 @@ cluster. * `node_config` - (Optional) Parameters used in creating the node pool. See [google_container_cluster](container_cluster.html#nested_node_config) for schema. -* `network_config` - (Optional) The network configuration of the pool. See - [google_container_cluster](container_cluster.html) for schema. +* `network_config` - (Optional) The network configuration of the pool. Such as + configuration for [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Or enabling private nodes. Structure is + [documented below](#nested_network_config) * `node_count` - (Optional) The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside `autoscaling`. @@ -199,6 +200,16 @@ cluster. * `auto_upgrade` - (Optional) Whether the nodes will be automatically upgraded. +The `network_config` block supports: + +* `create_pod_range` - (Optional) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified. + +* `enable_private_nodes` - (Optional) Whether nodes have internal IP addresses only. + +* `pod_ipv4_cidr_block` - (Optional) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use. + +* `pod_range` - (Optional) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID. + The `upgrade_settings` block supports: * `max_surge` - (Required) The number of additional nodes that can be added to the node pool during