Skip to content

Commit

Permalink
Add terraform support for storage_pools on cluster/nodepool create, a…
Browse files Browse the repository at this point in the history
…nd nodepool update
  • Loading branch information
amacaskill committed Aug 8, 2024
1 parent 11ca550 commit 94f7f22
Show file tree
Hide file tree
Showing 5 changed files with 371 additions and 4 deletions.
19 changes: 19 additions & 0 deletions mmv1/third_party/terraform/services/container/node_config.go.erb
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,14 @@ func schemaNodeConfig() *schema.Schema {
Description: `The list of instance tags applied to all nodes.`,
},

"storage_pools": {
Type: schema.TypeList,
ForceNew: true,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: `The list of Storage Pools where boot disks are provisioned.`,
},

"shielded_instance_config": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -1011,6 +1019,16 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.Tags = tags
}

if v, ok := nodeConfig["storage_pools"]; ok {
spList := v.([]interface{})
storagePools := []string{}
for _, v := range spList {
if v != nil {
storagePools = append(storagePools, v.(string))
}
}
nc.StoragePools = storagePools
}
if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{
Expand Down Expand Up @@ -1413,6 +1431,7 @@ func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]inte
"tags": c.Tags,
"preemptible": c.Preemptible,
"secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks),
"storage_pools": c.StoragePools,
"spot": c.Spot,
"min_cpu_platform": c.MinCpuPlatform,
"shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,16 @@ import (
"fmt"
"testing"
"regexp"
"net/http"
"time"

"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/hashicorp/terraform-provider-google/google/acctest"
"github.com/hashicorp/terraform-provider-google/google/envvar"
"github.com/hashicorp/terraform-provider-google/google/services/container"
transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport"
tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute"
)

func TestAccContainerCluster_basic(t *testing.T) {
Expand Down Expand Up @@ -11188,3 +11192,188 @@ resource "google_container_cluster" "primary" {
}
`, secretID, clusterName, networkName, subnetworkName)
}


func TestAccContainerCluster_storagePoolsWithNodePool(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
pid := envvar.GetTestProjectFromEnv()
location := envvar.GetTestZoneFromEnv()
storagePoolName := fmt.Sprintf("tf-test-storage-pool-node-pool")
storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName)
t.Cleanup(func() {
cleanupTestingStoragePool(t, storagePoolName)
})

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName),
Config: testAccContainerCluster_storagePoolsWithNodePool(cluster, np, networkName, subnetworkName, storagePoolResourceName, location),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_pool", "node_pool.0.node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_cluster.storage_pools_with_node_pool",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerCluster_storagePoolsWithNodePool(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "storage_pools_with_node_pool" {
name = "%[1]s"
location = "%[6]s"
deletion_protection = false
network = "%[3]s"
subnetwork = "%[4]s"
node_pool {
name = "%[2]s"
initial_node_count = 1
node_config {
machine_type = "c3-standard-4"
image_type = "COS_CONTAINERD"
storage_pools = ["%[5]s"]
disk_type = "hyperdisk-balanced"
}
}
}
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
}

func TestAccContainerCluster_storagePoolsWithNodeConfig(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10))
np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10))
networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster")
subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName)
pid := envvar.GetTestProjectFromEnv()
location := envvar.GetTestZoneFromEnv()
storagePoolName := fmt.Sprintf("tf-test-storage-pool-node-config")
storagePoolResourceName := fmt.Sprintf("projects/%s/zones/%s/storagePools/%s", pid, location, storagePoolName)
t.Cleanup(func() {
cleanupTestingStoragePool(t, storagePoolName)
})

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
PreConfig: setupTestingStoragePool_HyperdiskBalanced(t, storagePoolName),
Config: testAccContainerCluster_storagePoolsWithNodeConfig(cluster, np, networkName, subnetworkName, storagePoolResourceName, location),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_cluster.storage_pools_with_node_config", "node_config.0.storage_pools.0", storagePoolResourceName),
),
},
{
ResourceName: "google_container_cluster.storage_pools_with_node_config",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func testAccContainerCluster_storagePoolsWithNodeConfig(cluster, np, networkName, subnetworkName, storagePoolResourceName, location string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "storage_pools_with_node_config" {
name = "%[1]s"
location = "%[6]s"
initial_node_count = 1
deletion_protection = false
network = "%[3]s"
subnetwork = "%[4]s"
node_config {
machine_type = "c3-standard-4"
image_type = "COS_CONTAINERD"
storage_pools = ["%[5]s"]
disk_type = "hyperdisk-balanced"
}
}
`, cluster, np, networkName, subnetworkName, storagePoolResourceName, location)
}

func setupTestingStoragePool_HyperdiskBalanced(t *testing.T, storagePoolName string) func() {
return func() {
config := acctest.GoogleProviderConfig(t)
headers := make(http.Header)
project := envvar.GetTestProjectFromEnv()
zone := envvar.GetTestZoneFromEnv()
url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone)
storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", project, zone)
defaultTimeout := 20 * time.Minute
obj := make(map[string]interface{})
obj["name"] = storagePoolName
obj["poolProvisionedCapacityGb"] = 10240
obj["poolProvisionedIops"] = 10000
obj["poolProvisionedThroughput"] = 1024
obj["storagePoolType"] = storagePoolTypeUrl
obj["capacityProvisioningType"] = "ADVANCED"

res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "POST",
Project: project,
RawURL: url,
UserAgent: config.UserAgent,
Body: obj,
Timeout: defaultTimeout,
Headers: headers,
})
if err != nil {
t.Errorf("Error creating StoragePool: %s", err)
// Clean up storage pool if it fails due to already exists.
cleanupTestingStoragePool(t, storagePoolName)
}

err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout)
if err != nil {
t.Errorf("Error waiting to create StoragePool: %s", err)
}
}
}

func cleanupTestingStoragePool(t *testing.T, storagePoolName string) {
config := acctest.GoogleProviderConfig(t)
headers := make(http.Header)
project := envvar.GetTestProjectFromEnv()
zone := envvar.GetTestZoneFromEnv()
url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools/%s", config.ComputeBasePath, project, zone, storagePoolName)
defaultTimeout := 20 * time.Minute
var obj map[string]interface{}

res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "DELETE",
Project: project,
RawURL: url,
UserAgent: config.UserAgent,
Body: obj,
Timeout: defaultTimeout,
Headers: headers,
})
if err != nil {
t.Errorf("Error deleting StoragePool: %s", err)
}

err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Deleting StoragePool", config.UserAgent, defaultTimeout)
if err != nil {
t.Errorf("Error waiting to delete StoragePool: %s", err)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1453,13 +1453,25 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node

if d.HasChange("node_config.0.disk_size_gb") ||
d.HasChange("node_config.0.disk_type") ||
d.HasChange("node_config.0.machine_type") {
d.HasChange("node_config.0.machine_type") ||
d.HasChange("node_config.0.storage_pools") {
req := &container.UpdateNodePoolRequest{
Name: name,
DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)),
DiskType: d.Get("node_config.0.disk_type").(string),
MachineType: d.Get("node_config.0.machine_type").(string),
}
if v, ok := d.GetOk("node_config.0.storage_pools"); ok {
spList := v.([]interface{})
storagePools := []string{}
for _, v := range spList {
if v != nil {
storagePools = append(storagePools, v.(string))
}
}
req.StoragePools = storagePools
}

updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
Expand All @@ -1474,14 +1486,14 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
return ContainerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent,
"updating GKE node pool disk_size_gb/disk_type/machine_type/storage_pools", userAgent,
timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id())
log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type/storage_pools for Node Pool %s", d.Id())
}

if d.HasChange(prefix + "node_config.0.taint") {
Expand Down
Loading

0 comments on commit 94f7f22

Please sign in to comment.