diff --git a/third_party/terraform/resources/resource_bigtable_instance.go b/third_party/terraform/resources/resource_bigtable_instance.go index ead906450e39..d05747120083 100644 --- a/third_party/terraform/resources/resource_bigtable_instance.go +++ b/third_party/terraform/resources/resource_bigtable_instance.go @@ -4,12 +4,12 @@ import ( "context" "fmt" "log" + "reflect" + "cloud.google.com/go/bigtable" "github.com/hashicorp/terraform/helper/customdiff" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" - - "cloud.google.com/go/bigtable" ) func resourceBigtableInstance() *schema.Resource { @@ -21,7 +21,8 @@ func resourceBigtableInstance() *schema.Resource { CustomizeDiff: customdiff.All( resourceBigtableInstanceValidateDevelopment, - resourceBigtableInstanceClusterReorderTypeList, + // resourceBigtableInstanceClusterReorderTypeList, + resourceResize, ), Schema: map[string]*schema.Schema{ @@ -40,12 +41,10 @@ func resourceBigtableInstance() *schema.Resource { "cluster_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "zone": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "num_nodes": { Type: schema.TypeInt, @@ -56,7 +55,6 @@ func resourceBigtableInstance() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "SSD", - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), }, }, @@ -236,16 +234,47 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er clusterMap[cluster.Name] = cluster } + clusterTodoMap := make(map[string]interface{}, d.Get("cluster.#").(int)) for _, cluster := range d.Get("cluster").([]interface{}) { config := cluster.(map[string]interface{}) + clusterTodoMap[config["cluster_id"].(string)] = config cluster_id := config["cluster_id"].(string) if cluster, ok := clusterMap[cluster_id]; ok { if cluster.ServeNodes != config["num_nodes"].(int) { + log.Printf("[DEBUG] updating size - %s", d.Get("name").(string)) err = c.UpdateCluster(ctx, d.Get("name").(string), cluster.Name, int32(config["num_nodes"].(int))) if err != nil { return fmt.Errorf("Error updating cluster %s for instance %s", cluster.Name, d.Get("name").(string)) } } + } else { + var newStorageType bigtable.StorageType + switch config["storage_type"].(string) { + case "SSD": + newStorageType = bigtable.SSD + case "HDD": + newStorageType = bigtable.HDD + } + conf := &bigtable.ClusterConfig{ + InstanceID: d.Get("name").(string), + ClusterID: cluster_id, + Zone: config["zone"].(string), + NumNodes: int32(config["num_nodes"].(int)), + StorageType: newStorageType, + } + log.Printf("[DEBUG] creating new - %s", d.Get("name").(string)) + c.CreateCluster(ctx, conf) + } + } + + for name := range clusterMap { + if _, ok := clusterTodoMap[name]; !ok { + // cluster is running, but TF will have to remove it + log.Printf("[DEBUG] removal of - %s", d.Get("name").(string)) + err = c.DeleteCluster(ctx, d.Get("name").(string), name) + if err != nil { + return fmt.Errorf("Error deleting cluster %s for instance %s", name, d.Get("name").(string)) + } } } @@ -335,29 +364,43 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m return fmt.Errorf("config is invalid: Too many cluster blocks: No more than 4 \"cluster\" blocks are allowed") } - if old_count.(int) != new_count.(int) { - return nil - } - var old_ids []string clusters := make(map[string]interface{}, new_count.(int)) - for i := 0; i < new_count.(int); i++ { + for i := 0; i < new_count.(int) || i < old_count.(int); i++ { old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) if old_id != nil && old_id != "" { old_ids = append(old_ids, old_id.(string)) } - _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) clusters[new_id.(string)] = c } // reorder clusters according to the old cluster order + matching_ids := make(map[string]interface{}, new_count.(int)) var old_cluster_order []interface{} + for _, id := range old_ids { if c, ok := clusters[id]; ok { + matching_ids[id] = c + delete(clusters, id) + } + } + + for _, id := range old_ids { + if c, ok := matching_ids[id]; ok { + old_cluster_order = append(old_cluster_order, c) + } else { + // todo empty clusters + keys := reflect.ValueOf(clusters).MapKeys() + c := clusters[keys[0].Interface().(string)] + delete(clusters, keys[0].Interface().(string)) old_cluster_order = append(old_cluster_order, c) } } + for _, value := range clusters { + old_cluster_order = append(old_cluster_order, value) + } err := diff.SetNew("cluster", old_cluster_order) if err != nil { @@ -366,3 +409,53 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m return nil } + +func resourceResize(diff *schema.ResourceDiff, meta interface{}) error { + old_count, new_count := diff.GetChange("cluster.#") + if old_count.(int) != new_count.(int) { + log.Printf("[DEBUG] resize from %d to %d", old_count, new_count) + } + + new_clusters := make(map[string]interface{}, new_count.(int)) + old_clusters := make(map[string]interface{}, old_count.(int)) + + for i := 0; i < new_count.(int) || i < old_count.(int); i++ { + old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) + o, n := diff.GetChange(fmt.Sprintf("cluster.%d", i)) + if old_id != nil && old_id != "" { + old_clusters[old_id.(string)] = o + } + if new_id != nil && new_id != "" { + new_clusters[old_id.(string)] = n + } + } + + for i := 0; i < old_count.(int) || i < new_count.(int); i++ { + _, cluster_new := diff.GetChange(fmt.Sprintf("cluster.%d", i)) + // cluster_old_map := cluster_old.(map[string]interface{}) + cluster_new_map := cluster_new.(map[string]interface{}) + + if c, ok := old_clusters[cluster_new_map["cluster_id"].(string)]; ok { + log.Printf("[DEBUG] changes for %s", cluster_new_map["cluster_id"].(string)) + cluster_old_map := c.(map[string]interface{}) + if cluster_old_map["storage_type"].(string) != cluster_new_map["storage_type"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) + } + if cluster_old_map["zone"].(string) != cluster_new_map["zone"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.zone", i)) + } + if cluster_old_map["cluster_id"].(string) != cluster_new_map["cluster_id"].(string) { + diff.ForceNew(fmt.Sprintf("cluster.%d.cluster_id", i)) + } + } else { + if cluster_new_map["cluster_id"] == "" { + log.Printf("[DEBUG] removal - %s", cluster_new_map["cluster_id"].(string)) + } else { + log.Printf("[DEBUG] new cluster - %s", cluster_new_map["cluster_id"].(string)) + } + + } + + } + return nil +} diff --git a/third_party/terraform/tests/resource_bigtable_instance_test.go b/third_party/terraform/tests/resource_bigtable_instance_test.go index 16426f7f6c7f..1bf766f24d6a 100644 --- a/third_party/terraform/tests/resource_bigtable_instance_test.go +++ b/third_party/terraform/tests/resource_bigtable_instance_test.go @@ -75,6 +75,41 @@ func TestAccBigtableInstance_cluster(t *testing.T) { }) } +func TestAccBigtableInstance_cluster_resize(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigtableInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigtableInstance_one_in_the_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + { + Config: testAccBigtableInstance_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + { + Config: testAccBigtableInstance_two_in_the_cluster(instanceName, 3), + Check: resource.ComposeTestCheckFunc( + testAccBigtableInstanceExists( + "google_bigtable_instance.instance", 3), + ), + }, + }, + }) +} + func TestAccBigtableInstance_development(t *testing.T) { t.Parallel() @@ -193,6 +228,40 @@ resource "google_bigtable_instance" "instance" { `, instanceName) } +func testAccBigtableInstance_one_in_the_cluster(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } +} +`, instanceName, instanceName, numNodes) +} + +func testAccBigtableInstance_two_in_the_cluster(instanceName string, numNodes int) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s-a" + zone = "us-central1-a" + num_nodes = %d + storage_type = "HDD" + } + cluster { + cluster_id = "%s-b" + zone = "us-central1-b" + num_nodes = %d + storage_type = "HDD" + } +} +`, instanceName, instanceName, numNodes, instanceName, numNodes) +} + func testAccBigtableInstance_cluster(instanceName string, numNodes int) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" {