-
Notifications
You must be signed in to change notification settings - Fork 1.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add update support for bigtable clusters (such as resizing) #2350
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,12 +4,12 @@ import ( | |
"context" | ||
"fmt" | ||
"log" | ||
"reflect" | ||
|
||
"cloud.google.com/go/bigtable" | ||
"github.com/hashicorp/terraform/helper/customdiff" | ||
"github.com/hashicorp/terraform/helper/schema" | ||
"github.com/hashicorp/terraform/helper/validation" | ||
|
||
"cloud.google.com/go/bigtable" | ||
) | ||
|
||
func resourceBigtableInstance() *schema.Resource { | ||
|
@@ -21,7 +21,8 @@ func resourceBigtableInstance() *schema.Resource { | |
|
||
CustomizeDiff: customdiff.All( | ||
resourceBigtableInstanceValidateDevelopment, | ||
resourceBigtableInstanceClusterReorderTypeList, | ||
// resourceBigtableInstanceClusterReorderTypeList, | ||
resourceResize, | ||
), | ||
|
||
Schema: map[string]*schema.Schema{ | ||
|
@@ -40,12 +41,10 @@ func resourceBigtableInstance() *schema.Resource { | |
"cluster_id": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
ForceNew: true, | ||
}, | ||
"zone": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
ForceNew: true, | ||
}, | ||
"num_nodes": { | ||
Type: schema.TypeInt, | ||
|
@@ -56,7 +55,6 @@ func resourceBigtableInstance() *schema.Resource { | |
Type: schema.TypeString, | ||
Optional: true, | ||
Default: "SSD", | ||
ForceNew: true, | ||
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), | ||
}, | ||
}, | ||
|
@@ -236,16 +234,47 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er | |
clusterMap[cluster.Name] = cluster | ||
} | ||
|
||
clusterTodoMap := make(map[string]interface{}, d.Get("cluster.#").(int)) | ||
for _, cluster := range d.Get("cluster").([]interface{}) { | ||
config := cluster.(map[string]interface{}) | ||
clusterTodoMap[config["cluster_id"].(string)] = config | ||
cluster_id := config["cluster_id"].(string) | ||
if cluster, ok := clusterMap[cluster_id]; ok { | ||
if cluster.ServeNodes != config["num_nodes"].(int) { | ||
log.Printf("[DEBUG] updating size - %s", d.Get("name").(string)) | ||
err = c.UpdateCluster(ctx, d.Get("name").(string), cluster.Name, int32(config["num_nodes"].(int))) | ||
if err != nil { | ||
return fmt.Errorf("Error updating cluster %s for instance %s", cluster.Name, d.Get("name").(string)) | ||
} | ||
} | ||
} else { | ||
var newStorageType bigtable.StorageType | ||
switch config["storage_type"].(string) { | ||
case "SSD": | ||
newStorageType = bigtable.SSD | ||
case "HDD": | ||
newStorageType = bigtable.HDD | ||
} | ||
conf := &bigtable.ClusterConfig{ | ||
InstanceID: d.Get("name").(string), | ||
ClusterID: cluster_id, | ||
Zone: config["zone"].(string), | ||
NumNodes: int32(config["num_nodes"].(int)), | ||
StorageType: newStorageType, | ||
} | ||
log.Printf("[DEBUG] creating new - %s", d.Get("name").(string)) | ||
c.CreateCluster(ctx, conf) | ||
} | ||
} | ||
|
||
for name := range clusterMap { | ||
if _, ok := clusterTodoMap[name]; !ok { | ||
// cluster is running, but TF will have to remove it | ||
log.Printf("[DEBUG] removal of - %s", d.Get("name").(string)) | ||
err = c.DeleteCluster(ctx, d.Get("name").(string), name) | ||
if err != nil { | ||
return fmt.Errorf("Error deleting cluster %s for instance %s", name, d.Get("name").(string)) | ||
} | ||
} | ||
} | ||
|
||
|
@@ -335,29 +364,43 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m | |
return fmt.Errorf("config is invalid: Too many cluster blocks: No more than 4 \"cluster\" blocks are allowed") | ||
} | ||
|
||
if old_count.(int) != new_count.(int) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The validation above here is worth preserving. I'd expect the new CustomizeDiff can supersede the rest, though. |
||
return nil | ||
} | ||
|
||
var old_ids []string | ||
clusters := make(map[string]interface{}, new_count.(int)) | ||
|
||
for i := 0; i < new_count.(int); i++ { | ||
for i := 0; i < new_count.(int) || i < old_count.(int); i++ { | ||
old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) | ||
_, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) | ||
if old_id != nil && old_id != "" { | ||
old_ids = append(old_ids, old_id.(string)) | ||
} | ||
_, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) | ||
clusters[new_id.(string)] = c | ||
} | ||
|
||
// reorder clusters according to the old cluster order | ||
matching_ids := make(map[string]interface{}, new_count.(int)) | ||
var old_cluster_order []interface{} | ||
|
||
for _, id := range old_ids { | ||
if c, ok := clusters[id]; ok { | ||
matching_ids[id] = c | ||
delete(clusters, id) | ||
} | ||
} | ||
|
||
for _, id := range old_ids { | ||
if c, ok := matching_ids[id]; ok { | ||
old_cluster_order = append(old_cluster_order, c) | ||
} else { | ||
// todo empty clusters | ||
keys := reflect.ValueOf(clusters).MapKeys() | ||
c := clusters[keys[0].Interface().(string)] | ||
delete(clusters, keys[0].Interface().(string)) | ||
old_cluster_order = append(old_cluster_order, c) | ||
} | ||
} | ||
for _, value := range clusters { | ||
old_cluster_order = append(old_cluster_order, value) | ||
} | ||
|
||
err := diff.SetNew("cluster", old_cluster_order) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is producing "Provider produced inconsistent final plan" after the change. |
||
if err != nil { | ||
|
@@ -366,3 +409,53 @@ func resourceBigtableInstanceClusterReorderTypeList(diff *schema.ResourceDiff, m | |
|
||
return nil | ||
} | ||
|
||
func resourceResize(diff *schema.ResourceDiff, meta interface{}) error { | ||
old_count, new_count := diff.GetChange("cluster.#") | ||
if old_count.(int) != new_count.(int) { | ||
log.Printf("[DEBUG] resize from %d to %d", old_count, new_count) | ||
} | ||
|
||
new_clusters := make(map[string]interface{}, new_count.(int)) | ||
old_clusters := make(map[string]interface{}, old_count.(int)) | ||
|
||
for i := 0; i < new_count.(int) || i < old_count.(int); i++ { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do you mind commenting the blocks like this with what they're doing? While it's possible to figure out, customizeDiffs are pretty finicky and it's a big help to be able to read quickly what they're doing. |
||
old_id, new_id := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) | ||
o, n := diff.GetChange(fmt.Sprintf("cluster.%d", i)) | ||
if old_id != nil && old_id != "" { | ||
old_clusters[old_id.(string)] = o | ||
} | ||
if new_id != nil && new_id != "" { | ||
new_clusters[old_id.(string)] = n | ||
} | ||
} | ||
|
||
for i := 0; i < old_count.(int) || i < new_count.(int); i++ { | ||
_, cluster_new := diff.GetChange(fmt.Sprintf("cluster.%d", i)) | ||
// cluster_old_map := cluster_old.(map[string]interface{}) | ||
cluster_new_map := cluster_new.(map[string]interface{}) | ||
|
||
if c, ok := old_clusters[cluster_new_map["cluster_id"].(string)]; ok { | ||
log.Printf("[DEBUG] changes for %s", cluster_new_map["cluster_id"].(string)) | ||
cluster_old_map := c.(map[string]interface{}) | ||
if cluster_old_map["storage_type"].(string) != cluster_new_map["storage_type"].(string) { | ||
diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) | ||
} | ||
if cluster_old_map["zone"].(string) != cluster_new_map["zone"].(string) { | ||
diff.ForceNew(fmt.Sprintf("cluster.%d.zone", i)) | ||
} | ||
if cluster_old_map["cluster_id"].(string) != cluster_new_map["cluster_id"].(string) { | ||
diff.ForceNew(fmt.Sprintf("cluster.%d.cluster_id", i)) | ||
} | ||
} else { | ||
if cluster_new_map["cluster_id"] == "" { | ||
log.Printf("[DEBUG] removal - %s", cluster_new_map["cluster_id"].(string)) | ||
} else { | ||
log.Printf("[DEBUG] new cluster - %s", cluster_new_map["cluster_id"].(string)) | ||
} | ||
|
||
} | ||
|
||
} | ||
return nil | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -75,6 +75,41 @@ func TestAccBigtableInstance_cluster(t *testing.T) { | |
}) | ||
} | ||
|
||
func TestAccBigtableInstance_cluster_resize(t *testing.T) { | ||
t.Parallel() | ||
|
||
instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) | ||
|
||
resource.Test(t, resource.TestCase{ | ||
PreCheck: func() { testAccPreCheck(t) }, | ||
Providers: testAccProviders, | ||
CheckDestroy: testAccCheckBigtableInstanceDestroy, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: testAccBigtableInstance_one_in_the_cluster(instanceName, 3), | ||
Check: resource.ComposeTestCheckFunc( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. After rebasing, you'll probably need to change to use "import-style" checks. See https://github.com/GoogleCloudPlatform/magic-modules/pull/2409/files#diff-1296b3b7cf174ed6b01d0139e46bdd24L69-R76. If you're seeing differences on |
||
testAccBigtableInstanceExists( | ||
"google_bigtable_instance.instance", 3), | ||
), | ||
}, | ||
{ | ||
Config: testAccBigtableInstance_cluster(instanceName, 3), | ||
Check: resource.ComposeTestCheckFunc( | ||
testAccBigtableInstanceExists( | ||
"google_bigtable_instance.instance", 3), | ||
), | ||
}, | ||
{ | ||
Config: testAccBigtableInstance_two_in_the_cluster(instanceName, 3), | ||
Check: resource.ComposeTestCheckFunc( | ||
testAccBigtableInstanceExists( | ||
"google_bigtable_instance.instance", 3), | ||
), | ||
}, | ||
}, | ||
}) | ||
} | ||
|
||
func TestAccBigtableInstance_development(t *testing.T) { | ||
t.Parallel() | ||
|
||
|
@@ -193,6 +228,40 @@ resource "google_bigtable_instance" "instance" { | |
`, instanceName) | ||
} | ||
|
||
func testAccBigtableInstance_one_in_the_cluster(instanceName string, numNodes int) string { | ||
return fmt.Sprintf(` | ||
resource "google_bigtable_instance" "instance" { | ||
name = "%s" | ||
cluster { | ||
cluster_id = "%s-a" | ||
zone = "us-central1-a" | ||
num_nodes = %d | ||
storage_type = "HDD" | ||
} | ||
} | ||
`, instanceName, instanceName, numNodes) | ||
} | ||
|
||
func testAccBigtableInstance_two_in_the_cluster(instanceName string, numNodes int) string { | ||
return fmt.Sprintf(` | ||
resource "google_bigtable_instance" "instance" { | ||
name = "%s" | ||
cluster { | ||
cluster_id = "%s-a" | ||
zone = "us-central1-a" | ||
num_nodes = %d | ||
storage_type = "HDD" | ||
} | ||
cluster { | ||
cluster_id = "%s-b" | ||
zone = "us-central1-b" | ||
num_nodes = %d | ||
storage_type = "HDD" | ||
} | ||
} | ||
`, instanceName, instanceName, numNodes, instanceName, numNodes) | ||
} | ||
|
||
func testAccBigtableInstance_cluster(instanceName string, numNodes int) string { | ||
return fmt.Sprintf(` | ||
resource "google_bigtable_instance" "instance" { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd expect all this updating logic will go away, and get replaced by expanders very similar (if not identical) to Create; https://github.com/googleapis/google-cloud-go/blob/master/bigtable/admin.go#L1290 presents the same interface as it.
If they end up identical, feel free to refactor into a shared "expandBigtableInstance" method or just copy/paste. Either is fine.