Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Bigtable autoscaling configs to Instance #4150

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/5803.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
bigtable: added support for `autoscaling_config` to `google_bigtable_instance`
```
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/hashicorp/terraform-provider-google-beta
require (
cloud.google.com/go/bigtable v1.10.1
cloud.google.com/go/bigtable v1.13.0
cloud.google.com/go/iam v0.1.1 // indirect
github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20220316014534-ac1c1c4fdb29
github.com/apparentlymart/go-cidr v1.1.0
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g=
cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8=
cloud.google.com/go/bigtable v1.13.0 h1:ay8BM3Am2aIH95swAO/hjdE1UwzxVzLxRJ5cEdXN/LY=
cloud.google.com/go/bigtable v1.13.0/go.mod h1:26n+Af4kb+O8sUWehsIbsEMLb/X0cK2tVgAasJwSj20=
cloud.google.com/go/compute v0.1.0 h1:rSUBvAyVwNJ5uQCKNJFMwPtTvJkfN38b6Pvb9zZoqJ8=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0 h1:mPL/MzDDYHsh5tHRS9mhmhWlcgClCrCa6ApQCU6wnHI=
Expand Down
51 changes: 48 additions & 3 deletions google-beta/resource_bigtable_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,31 @@ func resourceBigtableInstance() *schema.Resource {
Computed: true,
Description: `Describes the Cloud KMS encryption key that will be used to protect the destination Bigtable cluster. The requirements for this key are: 1) The Cloud Bigtable service account associated with the project that contains this cluster must be granted the cloudkms.cryptoKeyEncrypterDecrypter role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. 3) All clusters within an instance must use the same CMEK key. Values are of the form projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`,
},
"autoscaling_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: "A list of Autoscaling configurations. Only one element is used and allowed.",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_nodes": {
Type: schema.TypeInt,
Required: true,
Description: `The minimum number of nodes for autoscaling.`,
},
"max_nodes": {
Type: schema.TypeInt,
Required: true,
Description: `The maximum number of nodes for autoscaling.`,
},
"cpu_target": {
Type: schema.TypeInt,
Required: true,
Description: `The target CPU utilization for autoscaling. Value must be between 10 and 80.`,
},
},
},
},
},
},
},
Expand Down Expand Up @@ -352,13 +377,22 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} {
storageType = "HDD"
}

return map[string]interface{}{
cluster := map[string]interface{}{
"zone": c.Zone,
"num_nodes": c.ServeNodes,
"cluster_id": c.Name,
"storage_type": storageType,
"kms_key_name": c.KMSKeyName,
}
if c.AutoscalingConfig != nil {
cluster["autoscaling_config"] = make([]map[string]interface{}, 1)
autoscaling_config := cluster["autoscaling_config"].([]map[string]interface{})
autoscaling_config[0] = make(map[string]interface{})
autoscaling_config[0]["min_nodes"] = c.AutoscalingConfig.MinNodes
autoscaling_config[0]["max_nodes"] = c.AutoscalingConfig.MaxNodes
autoscaling_config[0]["cpu_target"] = c.AutoscalingConfig.CPUTargetPercent
}
return cluster
}

func expandBigtableClusters(clusters []interface{}, instanceID string, config *Config) ([]bigtable.ClusterConfig, error) {
Expand All @@ -376,14 +410,25 @@ func expandBigtableClusters(clusters []interface{}, instanceID string, config *C
case "HDD":
storageType = bigtable.HDD
}
results = append(results, bigtable.ClusterConfig{

cluster_config := bigtable.ClusterConfig{
InstanceID: instanceID,
Zone: zone,
ClusterID: cluster["cluster_id"].(string),
NumNodes: int32(cluster["num_nodes"].(int)),
StorageType: storageType,
KMSKeyName: cluster["kms_key_name"].(string),
})
}
autoscaling_configs := cluster["autoscaling_config"].([]interface{})
if len(autoscaling_configs) > 0 {
autoscaling_config := autoscaling_configs[0].(map[string]interface{})
cluster_config.AutoscalingConfig = &bigtable.AutoscalingConfig{
MinNodes: autoscaling_config["min_nodes"].(int),
MaxNodes: autoscaling_config["max_nodes"].(int),
CPUTargetPercent: autoscaling_config["cpu_target"].(int),
}
}
results = append(results, cluster_config)
}
return results, nil
}
Expand Down
157 changes: 157 additions & 0 deletions google-beta/resource_bigtable_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,131 @@ func TestAccBigtableInstance_kms(t *testing.T) {
})
}

func TestAccBigtableInstance_createWithAutoscalingAndUpdate(t *testing.T) {
// bigtable instance does not use the shared HTTP client, this test creates an instance
skipIfVcr(t)
t.Parallel()

instanceName := fmt.Sprintf("tf-test-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
// Create Autoscaling config with 2 nodes.
Config: testAccBigtableInstance_autoscalingCluster(instanceName, 2, 5, 70),
Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("google_bigtable_instance.instance",
"cluster.0.num_nodes", "2")),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
{
// Update Autoscaling configs.
Config: testAccBigtableInstance_autoscalingCluster(instanceName, 1, 5, 80),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
},
})
}

func TestAccBigtableInstance_enableAndDisableAutoscaling(t *testing.T) {
// bigtable instance does not use the shared HTTP client, this test creates an instance
skipIfVcr(t)
t.Parallel()

instanceName := fmt.Sprintf("tf-test-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigtableInstance(instanceName, 2),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
{
// Enable Autoscaling.
Config: testAccBigtableInstance_autoscalingCluster(instanceName, 2, 5, 70),
Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("google_bigtable_instance.instance",
"cluster.0.num_nodes", "2")),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
{
// Disable Autoscaling specifying num_nodes=1 and node count becomes 1.
Config: testAccBigtableInstance(instanceName, 1),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
},
})
}

func TestAccBigtableInstance_enableAndDisableAutoscalingWithoutNumNodes(t *testing.T) {
// bigtable instance does not use the shared HTTP client, this test creates an instance
skipIfVcr(t)
t.Parallel()

instanceName := fmt.Sprintf("tf-test-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t),
Steps: []resource.TestStep{
{
// Create Autoscaling cluster with 2 nodes.
Config: testAccBigtableInstance_autoscalingCluster(instanceName, 2, 5, 70),
Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("google_bigtable_instance.instance",
"cluster.0.num_nodes", "2")),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
{
// Disable Autoscaling without specifying num_nodes, it should use the current node count, which is 2.
Config: testAccBigtableInstance_noNumNodes(instanceName),
Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr("google_bigtable_instance.instance",
"cluster.0.num_nodes", "2")),
},
{
ResourceName: "google_bigtable_instance.instance",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, // we don't read instance type back
},
},
})
}

func testAccCheckBigtableInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
var ctx = context.Background()
Expand Down Expand Up @@ -230,6 +355,21 @@ resource "google_bigtable_instance" "instance" {
`, instanceName, instanceName, numNodes)
}

func testAccBigtableInstance_noNumNodes(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
name = "%s"
cluster {
cluster_id = "%s"
storage_type = "HDD"
}
deletion_protection = false
labels = {
env = "default"
}
}`, instanceName, instanceName)
}

func testAccBigtableInstance_invalid(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
Expand Down Expand Up @@ -395,3 +535,20 @@ resource "google_bigtable_instance" "instance" {
}
`, pid, instanceName, instanceName, numNodes, kmsKey)
}

func testAccBigtableInstance_autoscalingCluster(instanceName string, min int, max int, cpuTarget int) string {
return fmt.Sprintf(`resource "google_bigtable_instance" "instance" {
name = "%s"
cluster {
cluster_id = "%s"
storage_type = "HDD"
autoscaling_config {
min_nodes = %d
max_nodes = %d
cpu_target = %d
}
}
deletion_protection = false

}`, instanceName, instanceName, min, max, cpuTarget)
}
8 changes: 8 additions & 0 deletions website/docs/r/bigtable_instance.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,14 @@ Bigtable instances are noted on the [Cloud Bigtable locations page](https://clou
Required, with a minimum of `1` for a `PRODUCTION` instance. Must be left unset
for a `DEVELOPMENT` instance.

* `autoscaling_config` - (Optional) Autoscaling config for the cluster, contains the following arguments:

* `min_nodes` - (Required) The minimum number of nodes for autoscaling.
* `max_nodes` - (Required) The maximum number of nodes for autoscaling.
* `cpu_target` - (Required) The CPU utilization target in percentage. Must be between 10 and 80.

!> **Warning**: Only one of `autoscaling_config` or `num_nodes` should be set for a cluster. If both are set, `num_nodes` is ignored. If none is set, autoscaling will be disabled and sized to the current node count.

* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or
`"HDD"`. Defaults to `"SSD"`.

Expand Down