From 817cc15ce1d5e18e5c44761f4cef1543d6a8ff57 Mon Sep 17 00:00:00 2001 From: Radoslaw Stankiewicz Date: Fri, 20 Sep 2019 13:09:31 +0200 Subject: [PATCH 1/3] expand, reduce bigtable --- .../resources/resource_bigtable_instance.go | 106 ++++++++++++++++-- 1 file changed, 95 insertions(+), 11 deletions(-) diff --git a/third_party/terraform/resources/resource_bigtable_instance.go b/third_party/terraform/resources/resource_bigtable_instance.go index ead906450e39..2ef6642fe138 100644 --- a/third_party/terraform/resources/resource_bigtable_instance.go +++ b/third_party/terraform/resources/resource_bigtable_instance.go @@ -4,12 +4,12 @@ import ( "context" "fmt" "log" + "reflect" + "cloud.google.com/go/bigtable" "github.com/hashicorp/terraform/helper/customdiff" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" - - "cloud.google.com/go/bigtable" ) func resourceBigtableInstance() *schema.Resource { @@ -22,6 +22,7 @@ func resourceBigtableInstance() *schema.Resource { CustomizeDiff: customdiff.All( resourceBigtableInstanceValidateDevelopment, resourceBigtableInstanceClusterReorderTypeList, + resourceResize, ), Schema: map[string]*schema.Schema{ @@ -40,12 +41,10 @@ func resourceBigtableInstance() *schema.Resource { "cluster_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "zone": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "num_nodes": { Type: schema.TypeInt, @@ -56,7 +55,6 @@ func resourceBigtableInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "SSD", - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), }, }, @@ -236,8 +234,12 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er clusterMap[cluster.Name] = cluster } + clusterTodoMap := make(map[string]interface{}, d.Get("cluster.#").(int)) + var storageTypeShort string for _, cluster := range d.Get("cluster").([]interface{}) { config := cluster.(map[string]interface{}) + storageTypeShort = config["storage_type"].(string) + clusterTodoMap[config["cluster_id"].(string)] = config cluster_id := config["cluster_id"].(string) if cluster, ok := clusterMap[cluster_id]; ok { if cluster.ServeNodes != config["num_nodes"].(int) { @@ -246,9 +248,46 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating cluster %s for instance %s", cluster.Name, d.Get("name").(string)) } } + } else { + // new cluster to be added + var newStorageType bigtable.StorageType + switch config["storage_type"].(string) { + case "SSD": + newStorageType = bigtable.SSD + case "HDD": + newStorageType = bigtable.HDD + } + conf := &bigtable.ClusterConfig{ + InstanceID: d.Get("name").(string), + ClusterID: cluster_id, + Zone: config["zone"].(string), + NumNodes: int32(config["num_nodes"].(int)), + StorageType: newStorageType, + } + c.CreateCluster(ctx, conf) } } + for name := range clusterMap { + if _, ok := clusterTodoMap[name]; !ok { + // cluster is running, but TF will have to remove it + err = c.DeleteCluster(ctx, d.Get("name").(string), name) + if err != nil { + return fmt.Errorf("Error deleting cluster %s for instance %s", name, d.Get("name").(string)) + } + } + } + // build current list of clusters after the change + clusterState := []map[string]interface{}{} + clustersOnline, err := c.Clusters(ctx, d.Get("name").(string)) + if err != nil { + return fmt.Errorf("Error retrieving clusters %q: %s", d.Get("name").(string), err.Error()) + } + for _, ci := range clustersOnline { + clusterState = append(clusterState, flattenBigtableCluster(ci, storageTypeShort)) + } + err = d.Set("cluster", clusterState) + return resourceBigtableInstanceRead(d, meta) } @@ -335,29 +374,43 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m return fmt.Errorf("config is invalid: Too many cluster blocks: No more than 4 \"cluster\" blocks are allowed") } - if old_count.(int) != new_count.(int) { - return nil - } - var old_ids []string clusters := make(map[string]interface{}, new_count.(int)) - for i := 0; i < new_count.(int); i++ { + for i := 0; i < new_count.(int) || i < old_count.(int); i++ { old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) if old_id != nil && old_id != "" { old_ids = append(old_ids, old_id.(string)) } - _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) clusters[new_id.(string)] = c } // reorder clusters according to the old cluster order + matching_ids := make(map[string]interface{}, new_count.(int)) var old_cluster_order []interface{} + for _, id := range old_ids { if c, ok := clusters[id]; ok { + matching_ids[id] = c + delete(clusters, id) + } + } + + for _, id := range old_ids { + if c, ok := matching_ids[id]; ok { + old_cluster_order = append(old_cluster_order, c) + } else { + // todo empty clusters + keys := reflect.ValueOf(clusters).MapKeys() + c := clusters[keys[0].Interface().(string)] + delete(clusters, keys[0].Interface().(string)) old_cluster_order = append(old_cluster_order, c) } } + for _, value := range clusters { + old_cluster_order = append(old_cluster_order, value) + } err := diff.SetNew("cluster", old_cluster_order) if err != nil { @@ -366,3 +419,34 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m return nil } + +func resourceResize(diff *schema.ResourceDiff, meta interface{}) error { + old_count, new_count := diff.GetChange("cluster.#") + if old_count.(int) != new_count.(int) { + log.Printf("[DEBUG] resize from %d to %d", old_count, new_count) + } + + for i := 0; i < old_count.(int) || i < new_count.(int); i++ { + cluster_old, cluster_new := diff.GetChange(fmt.Sprintf("cluster.%d", i)) + cluster_old_map := cluster_old.(map[string]interface{}) + cluster_new_map := cluster_new.(map[string]interface{}) + if cluster_old_map["cluster_id"] == nil { + log.Printf("[DEBUG] new [%s] will be added", cluster_new_map["cluster_id"].(string)) + } else if cluster_new_map["cluster_id"] == "" { + log.Printf("[DEBUG] old [%s] will be removed", cluster_old_map["cluster_id"].(string)) + } else { + log.Printf("[DEBUG] changes from %s to %s", cluster_old_map["cluster_id"].(string), cluster_new_map["cluster_id"].(string)) + if cluster_old_map["storage_type"].(string) != cluster_new_map["storage_type"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) + } + if cluster_old_map["zone"].(string) != cluster_new_map["zone"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.zone", i)) + } + if cluster_old_map["cluster_id"].(string) != cluster_new_map["cluster_id"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.cluster_id", i)) + } + } + + } + return nil +} From cba6df79e82cb6c79c78c870a361ea2878958013 Mon Sep 17 00:00:00 2001 From: Radoslaw Stankiewicz Date: Fri, 20 Sep 2019 17:05:43 +0200 Subject: [PATCH 2/3] test for resize --- .../tests/resource_bigtable_instance_test.go | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/third_party/terraform/tests/resource_bigtable_instance_test.go b/third_party/terraform/tests/resource_bigtable_instance_test.go index 16426f7f6c7f..1bf766f24d6a 100644 --- a/third_party/terraform/tests/resource_bigtable_instance_test.go +++ b/third_party/terraform/tests/resource_bigtable_instance_test.go @@ -75,6 +75,41 @@ func TestAccBigtableInstance_cluster(t *testing.T) { }) } +func TestAccBigtableInstance_cluster_resize(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigtableInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigtableInstance_one_in_the_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + { + Config: testAccBigtableInstance_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + { + Config: testAccBigtableInstance_two_in_the_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + }, + }) +} + func TestAccBigtableInstance_development(t *testing.T) { t.Parallel() @@ -193,6 +228,40 @@ resource "google_bigtable_instance" "instance" { `, instanceName) } +func testAccBigtableInstance_one_in_the_cluster(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } +} +`, instanceName, instanceName, numNodes) +} + +func testAccBigtableInstance_two_in_the_cluster(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-b" + zone = "us-central1-b" + num_nodes = %d + storage_type = "HDD" + } +} +`, instanceName, instanceName, numNodes, instanceName, numNodes) +} + func testAccBigtableInstance_cluster(instanceName string, numNodes int) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { From 5e0c339030807a5b3c9a64213f66d1a3aa897b21 Mon Sep 17 00:00:00 2001 From: Radoslaw Stankiewicz Date: Mon, 23 Sep 2019 14:50:50 +0200 Subject: [PATCH 3/3] fix plan --- .../resources/resource_bigtable_instance.go | 53 +++++++++++-------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/third_party/terraform/resources/resource_bigtable_instance.go b/third_party/terraform/resources/resource_bigtable_instance.go index 2ef6642fe138..d05747120083 100644 --- a/third_party/terraform/resources/resource_bigtable_instance.go +++ b/third_party/terraform/resources/resource_bigtable_instance.go @@ -21,7 +21,7 @@ func resourceBigtableInstance() *schema.Resource { CustomizeDiff: customdiff.All( resourceBigtableInstanceValidateDevelopment, - resourceBigtableInstanceClusterReorderTypeList, + // resourceBigtableInstanceClusterReorderTypeList, resourceResize, ), @@ -235,21 +235,19 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er } clusterTodoMap := make(map[string]interface{}, d.Get("cluster.#").(int)) - var storageTypeShort string for _, cluster := range d.Get("cluster").([]interface{}) { config := cluster.(map[string]interface{}) - storageTypeShort = config["storage_type"].(string) clusterTodoMap[config["cluster_id"].(string)] = config cluster_id := config["cluster_id"].(string) if cluster, ok := clusterMap[cluster_id]; ok { if cluster.ServeNodes != config["num_nodes"].(int) { + log.Printf("[DEBUG] updating size - %s", d.Get("name").(string)) err = c.UpdateCluster(ctx, d.Get("name").(string), cluster.Name, int32(config["num_nodes"].(int))) if err != nil { return fmt.Errorf("Error updating cluster %s for instance %s", cluster.Name, d.Get("name").(string)) } } } else { - // new cluster to be added var newStorageType bigtable.StorageType switch config["storage_type"].(string) { case "SSD": @@ -264,6 +262,7 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er NumNodes: int32(config["num_nodes"].(int)), StorageType: newStorageType, } + log.Printf("[DEBUG] creating new - %s", d.Get("name").(string)) c.CreateCluster(ctx, conf) } } @@ -271,22 +270,13 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er for name := range clusterMap { if _, ok := clusterTodoMap[name]; !ok { // cluster is running, but TF will have to remove it + log.Printf("[DEBUG] removal of - %s", d.Get("name").(string)) err = c.DeleteCluster(ctx, d.Get("name").(string), name) if err != nil { return fmt.Errorf("Error deleting cluster %s for instance %s", name, d.Get("name").(string)) } } } - // build current list of clusters after the change - clusterState := []map[string]interface{}{} - clustersOnline, err := c.Clusters(ctx, d.Get("name").(string)) - if err != nil { - return fmt.Errorf("Error retrieving clusters %q: %s", d.Get("name").(string), err.Error()) - } - for _, ci := range clustersOnline { - clusterState = append(clusterState, flattenBigtableCluster(ci, storageTypeShort)) - } - err = d.Set("cluster", clusterState) return resourceBigtableInstanceRead(d, meta) } @@ -426,16 +416,28 @@ func resourceResize(diff *schema.ResourceDiff, meta interface{}) error { log.Printf("[DEBUG] resize from %d to %d", old_count, new_count) } + new_clusters := make(map[string]interface{}, new_count.(int)) + old_clusters := make(map[string]interface{}, old_count.(int)) + + for i := 0; i < new_count.(int) || i < old_count.(int); i++ { + old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + o, n := diff.GetChange(fmt.Sprintf("cluster.%d", i)) + if old_id != nil && old_id != "" { + old_clusters[old_id.(string)] = o + } + if new_id != nil && new_id != "" { + new_clusters[old_id.(string)] = n + } + } + for i := 0; i < old_count.(int) || i < new_count.(int); i++ { - cluster_old, cluster_new := diff.GetChange(fmt.Sprintf("cluster.%d", i)) - cluster_old_map := cluster_old.(map[string]interface{}) + _, cluster_new := diff.GetChange(fmt.Sprintf("cluster.%d", i)) + // cluster_old_map := cluster_old.(map[string]interface{}) cluster_new_map := cluster_new.(map[string]interface{}) - if cluster_old_map["cluster_id"] == nil { - log.Printf("[DEBUG] new [%s] will be added", cluster_new_map["cluster_id"].(string)) - } else if cluster_new_map["cluster_id"] == "" { - log.Printf("[DEBUG] old [%s] will be removed", cluster_old_map["cluster_id"].(string)) - } else { - log.Printf("[DEBUG] changes from %s to %s", cluster_old_map["cluster_id"].(string), cluster_new_map["cluster_id"].(string)) + + if c, ok := old_clusters[cluster_new_map["cluster_id"].(string)]; ok { + log.Printf("[DEBUG] changes for %s", cluster_new_map["cluster_id"].(string)) + cluster_old_map := c.(map[string]interface{}) if cluster_old_map["storage_type"].(string) != cluster_new_map["storage_type"].(string) { diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) } @@ -445,6 +447,13 @@ func resourceResize(diff *schema.ResourceDiff, meta interface{}) error { if cluster_old_map["cluster_id"].(string) != cluster_new_map["cluster_id"].(string) { diff.ForceNew(fmt.Sprintf("cluster.%d.cluster_id", i)) } + } else { + if cluster_new_map["cluster_id"] == "" { + log.Printf("[DEBUG] removal - %s", cluster_new_map["cluster_id"].(string)) + } else { + log.Printf("[DEBUG] new cluster - %s", cluster_new_map["cluster_id"].(string)) + } + } }