Skip to content

Commit

Permalink
Add support for container cluster resource for container_node_pool.ne…
Browse files Browse the repository at this point in the history
…twork_config.enable_private_nodes

Signed-off-by: Francis Liu <liufrancis@google.com>
  • Loading branch information
Francis-Liu committed Nov 3, 2022
1 parent b171436 commit 13406e9
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,6 @@ var schemaNodePool = map[string]*schema.Schema{
Description: `The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way.`,
},

<% unless version == 'ga' -%>
"network_config": {
Type: schema.TypeList,
Optional: true,
Expand All @@ -280,10 +279,15 @@ var schemaNodePool = map[string]*schema.Schema{
ForceNew: true,
Description: `Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.`,
},

"enable_private_nodes": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
Description: `Whether nodes have internal IP addresses only.`,
},
"pod_range": {
Type: schema.TypeString,
Required: true,
Optional: true,
ForceNew: true,
Description: `The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.`,
},
Expand All @@ -298,7 +302,6 @@ var schemaNodePool = map[string]*schema.Schema{
},
},
},
<% end -%>

}

Expand Down Expand Up @@ -772,9 +775,7 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool,
Config: expandNodeConfig(d.Get(prefix + "node_config")),
Locations: locations,
Version: d.Get(prefix + "version").(string),
<% unless version == 'ga' -%>
NetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")),
<% end -%>
}

if v, ok := d.GetOk(prefix + "autoscaling"); ok {
Expand Down Expand Up @@ -878,9 +879,7 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP
"instance_group_urls": igmUrls,
"managed_instance_group_urls": managedIgmUrls,
"version": np.Version,
<% unless version == 'ga' -%>
"network_config": flattenNodeNetworkConfig(np.NetworkConfig, d, prefix),
<% end -%>
}

if np.Autoscaling != nil {
Expand Down Expand Up @@ -934,14 +933,14 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodeP
return nodePool, nil
}

<% unless version == 'ga' -%>
func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.ResourceData, prefix string) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
result = append(result, map[string]interface{}{
"create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required
"pod_ipv4_cidr_block": c.PodIpv4CidrBlock,
"pod_range": c.PodRange,
"create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required
"pod_ipv4_cidr_block": c.PodIpv4CidrBlock,
"pod_range": c.PodRange,
"enable_private_nodes": c.EnablePrivateNodes,
})
}
return result
Expand Down Expand Up @@ -970,12 +969,15 @@ func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig {
nnc.PodIpv4CidrBlock = v.(string)
}

if v, ok := networkNodeConfig["enable_private_nodes"]; ok {
nnc.EnablePrivateNodes = v.(bool)
nnc.ForceSendFields = []string{"EnablePrivateNodes"}
}

return nnc
}


<% end -%>

func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error {
config := meta.(*Config)
name := d.Get(prefix + "name").(string)
Expand Down Expand Up @@ -1393,6 +1395,40 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name)
}

if d.HasChange(prefix + "network_config") {
if d.HasChange(prefix + "network_config.0.enable_private_nodes") {
req := &container.UpdateNodePoolRequest{
NodePoolId: name,
NodeNetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")),
}
updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req)
if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()

if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool workload_metadata_config", userAgent,
timeout)
}

// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name)
}
}

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,6 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) {
}
<% end -%>

<% unless version == 'ga' -%>
func TestAccContainerNodePool_withNetworkConfig(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -478,7 +477,84 @@ func TestAccContainerNodePool_withNetworkConfig(t *testing.T) {
},
})
}
<% end -%>


func TestAccContainerNodePool_withEnablePrivateNodesToggle(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
np := fmt.Sprintf("tf-test-np-%s", randString(t, 10))
network := fmt.Sprintf("tf-test-net-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "true"),
},
{
ResourceName: "google_container_node_pool.with_enable_private_nodes",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
{
Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "false"),
},
{
ResourceName: "google_container_node_pool.with_enable_private_nodes",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"min_master_version"},
},
},
})
}


func testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, flag string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
name = "%s"
auto_create_subnetworks = false
}

resource "google_compute_subnetwork" "container_subnetwork" {
name = google_compute_network.container_network.name
network = google_compute_network.container_network.name
ip_cidr_range = "10.0.36.0/24"
region = "us-central1"
private_ip_google_access = true
}

resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1-a"
min_master_version = "1.23"
initial_node_count = 1

network = google_compute_network.container_network.name
subnetwork = google_compute_subnetwork.container_subnetwork.name
}

resource "google_container_node_pool" "with_enable_private_nodes" {
name = "%s"
location = "us-central1-a"
cluster = google_container_cluster.cluster.name
node_count = 1
network_config {
enable_private_nodes = %s
}
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
]
}
}
`, network, cluster, np, flag)
}

<% unless version.nil? || version == 'ga' -%>
func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) {
Expand Down Expand Up @@ -2065,7 +2141,6 @@ resource "google_container_node_pool" "with_linux_node_config" {
<% end -%>


<% unless version == 'ga' -%>
func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
Expand Down Expand Up @@ -2154,7 +2229,6 @@ resource "google_container_node_pool" "with_auto_pod_cidr" {

`, network, cluster, np, np)
}
<% end -%>


<% unless version.nil? || version == 'ga' -%>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,9 +255,6 @@ region are guaranteed to support the same version.
manages the default node pool, which isn't recommended to be used with
Terraform. Structure is [documented below](#nested_node_config).

* `network_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Configuration for
[Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is [documented below](#nested_network_config)

* `node_pool` - (Optional) List of node pools associated with this cluster.
See [google_container_node_pool](container_node_pool.html) for schema.
**Warning:** node pools defined inside a cluster can't be changed (or added/removed) after
Expand Down Expand Up @@ -828,14 +825,6 @@ linux_node_config {

* `node_group` - (Optional) Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on [sole tenant nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes).

<a name="nested_network_config"></a>The `network_config` block supports:

* `create_pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.

* `pod_ipv4_cidr_block` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

* `pod_range` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.

<a name="nested_ephemeral_storage_config"></a>The `ephemeral_storage_config` block supports:

* `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,9 @@ cluster.
* `node_config` - (Optional) Parameters used in creating the node pool. See
[google_container_cluster](container_cluster.html#nested_node_config) for schema.

* `network_config` - (Optional) The network configuration of the pool. See
[google_container_cluster](container_cluster.html) for schema.
* `network_config` - (Optional) The network configuration of the pool. Such as
configuration for [Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Or enabling private nodes. Structure is
[documented below](#nested_network_config)

* `node_count` - (Optional) The number of nodes per instance group. This field can be used to
update the number of nodes per instance group but should not be used alongside `autoscaling`.
Expand Down Expand Up @@ -199,6 +200,16 @@ cluster.

* `auto_upgrade` - (Optional) Whether the nodes will be automatically upgraded.

<a name="nested_network_config"></a>The `network_config` block supports:

* `create_pod_range` - (Optional) Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.

* `enable_private_nodes` - (Optional) Whether nodes have internal IP addresses only.

* `pod_ipv4_cidr_block` - (Optional) The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.

* `pod_range` - (Optional) The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.

<a name="nested_upgrade_settings"></a>The `upgrade_settings` block supports:

* `max_surge` - (Required) The number of additional nodes that can be added to the node pool during
Expand Down

0 comments on commit 13406e9

Please sign in to comment.