Skip to content

Commit

Permalink
[terraform] add support for gke resource usage (#825)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <magic-modules@google.com>
  • Loading branch information
modular-magician authored and danawillow committed Jun 21, 2019
1 parent 806bfad commit 4c5c537
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 0 deletions.
98 changes: 98 additions & 0 deletions google-beta/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,34 @@ func resourceContainerCluster() *schema.Resource {
},
},

"resource_usage_export_config": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enable_network_egress_metering": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"bigquery_destination": {
Type: schema.TypeList,
MaxItems: 1,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dataset_id": {
Type: schema.TypeString,
Required: true,
},
},
},
},
},
},
},

"enable_intranode_visibility": {
Type: schema.TypeBool,
Optional: true,
Expand Down Expand Up @@ -1001,6 +1029,9 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
if v, ok := d.GetOk("workload_identity_config"); ok {
cluster.WorkloadIdentityConfig = expandWorkloadIdentityConfig(v)
}
if v, ok := d.GetOk("resource_usage_export_config"); ok {
cluster.ResourceUsageExportConfig = expandResourceUsageExportConfig(v)
}

req := &containerBeta.CreateClusterRequest{
Cluster: cluster,
Expand Down Expand Up @@ -1191,6 +1222,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
}

d.Set("resource_labels", cluster.ResourceLabels)

if err := d.Set("resource_usage_export_config", flattenResourceUsageExportConfig(cluster.ResourceUsageExportConfig)); err != nil {
return err
}
return nil
}

Expand Down Expand Up @@ -1794,6 +1829,30 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}
}

if d.HasChange("resource_usage_export_config") {
c := d.Get("resource_usage_export_config")
req := &containerBeta.UpdateClusterRequest{
Update: &containerBeta.ClusterUpdate{
DesiredResourceUsageExportConfig: expandResourceUsageExportConfig(c),
},
}

updateF := func() error {
name := containerClusterFullName(project, location, clusterName)
op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do()
if err != nil {
return err
}
// Wait until it's updated
return containerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", timeoutInMinutes)
}
if err := lockedCall(lockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())

d.SetPartial("resource_usage_export_config")
}
d.Partial(false)

return resourceContainerClusterRead(d, meta)
Expand Down Expand Up @@ -2209,6 +2268,31 @@ func expandDefaultMaxPodsConstraint(v interface{}) *containerBeta.MaxPodsConstra
}
}

func expandResourceUsageExportConfig(configured interface{}) *containerBeta.ResourceUsageExportConfig {
l := configured.([]interface{})
if len(l) == 0 || l[0] == nil {
return &containerBeta.ResourceUsageExportConfig{}
}

resourceUsageConfig := l[0].(map[string]interface{})

result := &containerBeta.ResourceUsageExportConfig{
EnableNetworkEgressMetering: resourceUsageConfig["enable_network_egress_metering"].(bool),
ForceSendFields: []string{"EnableNetworkEgressMetering"},
}
if _, ok := resourceUsageConfig["bigquery_destination"]; ok {
if len(resourceUsageConfig["bigquery_destination"].([]interface{})) > 0 {
bigqueryDestination := resourceUsageConfig["bigquery_destination"].([]interface{})[0].(map[string]interface{})
if _, ok := bigqueryDestination["dataset_id"]; ok {
result.BigqueryDestination = &containerBeta.BigQueryDestination{
DatasetId: bigqueryDestination["dataset_id"].(string),
}
}
}
}
return result
}

func flattenNetworkPolicy(c *containerBeta.NetworkPolicy) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
Expand Down Expand Up @@ -2466,6 +2550,20 @@ func flattenPodSecurityPolicyConfig(c *containerBeta.PodSecurityPolicyConfig) []
}
}

func flattenResourceUsageExportConfig(c *containerBeta.ResourceUsageExportConfig) []map[string]interface{} {
if c == nil {
return nil
}
return []map[string]interface{}{
{
"enable_network_egress_metering": c.EnableNetworkEgressMetering,
"bigquery_destination": []map[string]interface{}{
{"dataset_id": c.BigqueryDestination.DatasetId},
},
},
}
}

func flattenDatabaseEncryption(c *containerBeta.DatabaseEncryption) []map[string]interface{} {
if c == nil {
return nil
Expand Down
63 changes: 63 additions & 0 deletions google-beta/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1875,6 +1875,40 @@ func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) {
})
}

func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) {
t.Parallel()

suffix := acctest.RandString(10)
clusterName := fmt.Sprintf("cluster-test-%s", suffix)
datesetId := fmt.Sprintf("tf_test_cluster_resource_usage_%s", suffix)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, true),
},
{
ResourceName: "google_container_cluster.with_resource_usage_export_config",
ImportStateIdPrefix: "us-central1-a/",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, false),
},
{
ResourceName: "google_container_cluster.with_resource_usage_export_config",
ImportStateIdPrefix: "us-central1-a/",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testAccCheckContainerClusterDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)

Expand Down Expand Up @@ -3220,6 +3254,35 @@ resource "google_container_cluster" "with_pod_security_policy" {
}`, clusterName, enabled)
}

func testAccContainerCluster_withResourceUsageExportConfig(clusterName, datasetId string, resourceUsage bool) string {
resourceUsageConfig := ""
if resourceUsage {
resourceUsageConfig = `
resource_usage_export_config {
enable_network_egress_metering = true
bigquery_destination {
dataset_id = "${google_bigquery_dataset.default.dataset_id}"
}
}`
}

config := fmt.Sprintf(`
resource "google_bigquery_dataset" "default" {
dataset_id = "%s"
description = "gke resource usage dataset tests"
delete_contents_on_destroy = true
}
resource "google_container_cluster" "with_resource_usage_export_config" {
name = "cluster-test-%s"
zone = "us-central1-a"
initial_node_count = 1
%s
}`, datasetId, clusterName, resourceUsageConfig)
return config
}

func testAccContainerCluster_withPrivateClusterConfig(clusterName string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
Expand Down
22 changes: 22 additions & 0 deletions website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,10 @@ to the datasource. A `region` can have a different set of supported versions tha

* `resource_labels` - (Optional) The GCE resource labels (a map of key/value pairs) to be applied to the cluster.

* `resource_usage_export_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) Configuration for the
[ResourceUsageExportConfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-usage-metering) feature.
Structure is documented below.

* `subnetwork` - (Optional) The name or self_link of the Google Compute Engine subnetwork in
which the cluster's instances are launched.

Expand Down Expand Up @@ -585,6 +589,24 @@ In addition, the `private_cluster_config` allows access to the following read-on

* `public_endpoint` - The external IP address of this cluster's master endpoint.

The `resource_usage_export_config` block supports:

* `enable_network_egress_metering` (Optional) - Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created
in the cluster to meter network egress traffic.

* `bigquery_destination` (Required) - Parameters for using BigQuery as the destination of resource usage export.

* `bigquery_destination.dataset_id` (Required) - The ID of a BigQuery Dataset. For Example:

```
resource_usage_export_config {
enable_network_egress_metering = false
bigquery_destination {
dataset_id = "cluster_resource_usage"
}
}
```

The `sandbox_type` block supports:

* `sandbox_type` (Required) Which sandbox to use for pods in the node pool.
Expand Down

0 comments on commit 4c5c537

Please sign in to comment.