From 0879a4c906fbd1a461b998fe86004982bd2309b7 Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 27 Sep 2023 17:36:34 -0400 Subject: [PATCH] Merge pull request #9107 from GoogleCloudPlatform/main_sync (#16022) Signed-off-by: Modular Magician --- .changelog/9107.txt | 3 + .copywrite.hcl | 1 + META.d/_summary.yaml | 12 + META.d/links.yaml | 7 + google/provider/provider.go | 6 +- .../alloydb/resource_alloydb_backup.go | 345 +++++-- .../resource_alloydb_backup_generated_test.go | 5 +- .../alloydb/resource_alloydb_cluster.go | 99 ++ .../alloydb/resource_alloydb_instance.go | 167 +++ .../alloydb/resource_alloydb_instance_test.go | 15 +- .../resource_artifact_registry_repository.go | 276 ++++- ...fact_registry_repository_generated_test.go | 92 ++ .../bigquery/resource_bigquery_table.go | 17 +- .../bigquery/resource_bigquery_table_test.go | 179 ++++ .../bigtable/resource_bigtable_instance.go | 31 +- ...esource_bigtable_instance_internal_test.go | 243 ++++- ...pute_instance_network_interface_helpers.go | 1 + .../compute/resource_compute_router_nat.go | 22 +- .../compute/resource_compute_subnetwork.go | 3 +- google/services/container/node_config.go | 3 + .../container/resource_container_cluster.go | 13 +- .../resource_container_node_pool_test.go | 54 +- ...esource_monitoring_notification_channel.go | 14 +- .../resource_os_config_patch_deployment.go | 37 + ..._config_patch_deployment_generated_test.go | 1 + .../services/pubsub/resource_pubsub_schema.go | 6 +- .../resource_secret_manager_secret.go | 1 - .../resource_secret_manager_secret_test.go | 167 +++ .../resource_scc_folder_custom_module.go | 962 ++++++++++++++++++ ...source_scc_folder_custom_module_sweeper.go | 139 +++ .../resource_scc_folder_custom_module_test.go | 228 +++++ ...resource_scc_organization_custom_module.go | 962 ++++++++++++++++++ ..._scc_organization_custom_module_sweeper.go | 139 +++ ...rce_scc_organization_custom_module_test.go | 199 ++++ ...cc_project_custom_module_generated_test.go | 176 ---- ...resource_scc_project_custom_module_test.go | 114 ++- .../sql/resource_sql_database_instance.go | 6 + .../resource_sql_database_instance_test.go | 1 + .../resource_storage_transfer_job.go | 80 +- .../resource_storage_transfer_job_test.go | 146 +++ .../tags/resource_tags_tag_binding.go | 8 + .../vertexai/resource_vertex_ai_index.go | 20 +- google/tpgresource/utils.go | 2 +- .../guides/version_5_upgrade.html.markdown | 209 +++- website/docs/r/alloydb_backup.html.markdown | 68 +- website/docs/r/alloydb_cluster.html.markdown | 17 + website/docs/r/alloydb_instance.html.markdown | 23 + ...artifact_registry_repository.html.markdown | 100 ++ .../docs/r/bigtable_instance.html.markdown | 1 + website/docs/r/compute_instance.html.markdown | 20 +- .../docs/r/compute_router_nat.html.markdown | 113 +- .../docs/r/compute_subnetwork.html.markdown | 29 +- .../docs/r/container_cluster.html.markdown | 4 +- website/docs/r/google_folder.html.markdown | 1 + .../os_config_patch_deployment.html.markdown | 5 + website/docs/r/pubsub_schema.html.markdown | 6 +- .../r/scc_folder_custom_module.html.markdown | 286 ++++++ ...c_organization_custom_module.html.markdown | 276 +++++ .../r/scc_project_custom_module.html.markdown | 98 +- .../r/sql_database_instance.html.markdown | 4 +- .../docs/r/storage_transfer_job.html.markdown | 14 +- website/docs/r/vertex_ai_index.html.markdown | 2 +- ...kstations_workstation_config.html.markdown | 14 + 63 files changed, 5865 insertions(+), 427 deletions(-) create mode 100644 .changelog/9107.txt create mode 100644 META.d/_summary.yaml create mode 100644 META.d/links.yaml create mode 100644 google/services/securitycenter/resource_scc_folder_custom_module.go create mode 100644 google/services/securitycenter/resource_scc_folder_custom_module_sweeper.go create mode 100644 google/services/securitycenter/resource_scc_folder_custom_module_test.go create mode 100644 google/services/securitycenter/resource_scc_organization_custom_module.go create mode 100644 google/services/securitycenter/resource_scc_organization_custom_module_sweeper.go create mode 100644 google/services/securitycenter/resource_scc_organization_custom_module_test.go delete mode 100644 google/services/securitycenter/resource_scc_project_custom_module_generated_test.go create mode 100644 website/docs/r/scc_folder_custom_module.html.markdown create mode 100644 website/docs/r/scc_organization_custom_module.html.markdown diff --git a/.changelog/9107.txt b/.changelog/9107.txt new file mode 100644 index 00000000000..8ec013c0699 --- /dev/null +++ b/.changelog/9107.txt @@ -0,0 +1,3 @@ +```release-note:none + +``` diff --git a/.copywrite.hcl b/.copywrite.hcl index 1ae8f463038..b97fbad7f17 100644 --- a/.copywrite.hcl +++ b/.copywrite.hcl @@ -17,6 +17,7 @@ project { "scripts/**", "google/**/test-fixtures/**", "META.d/*.yml", + "META.d/*.yaml", ".golangci.yml", ".goreleaser.yml", ] diff --git a/META.d/_summary.yaml b/META.d/_summary.yaml new file mode 100644 index 00000000000..c3dc9c1febb --- /dev/null +++ b/META.d/_summary.yaml @@ -0,0 +1,12 @@ +--- + +schema: 1.1 + +partition: tf-ecosystem + +summary: + owner: team-tf-hybrid-cloud + description: | + The Terraform Google provider is a plugin that allows Terraform to manage resources on Google Cloud Platform. + + visibility: external \ No newline at end of file diff --git a/META.d/links.yaml b/META.d/links.yaml new file mode 100644 index 00000000000..b15cd0fc248 --- /dev/null +++ b/META.d/links.yaml @@ -0,0 +1,7 @@ +runbooks: [] +#- name: +# link: + +other_links: [] +#- name: +# link: \ No newline at end of file diff --git a/google/provider/provider.go b/google/provider/provider.go index ad075b32850..20b099e1bb1 100644 --- a/google/provider/provider.go +++ b/google/provider/provider.go @@ -990,9 +990,9 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { }) } -// Generated resources: 319 +// Generated resources: 321 // Generated IAM resources: 207 -// Total generated resources: 526 +// Total generated resources: 528 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1462,8 +1462,10 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_secret_manager_secret_iam_member": tpgiamresource.ResourceIamMember(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), "google_secret_manager_secret_iam_policy": tpgiamresource.ResourceIamPolicy(secretmanager.SecretManagerSecretIamSchema, secretmanager.SecretManagerSecretIamUpdaterProducer, secretmanager.SecretManagerSecretIdParseFunc), "google_secret_manager_secret_version": secretmanager.ResourceSecretManagerSecretVersion(), + "google_scc_folder_custom_module": securitycenter.ResourceSecurityCenterFolderCustomModule(), "google_scc_mute_config": securitycenter.ResourceSecurityCenterMuteConfig(), "google_scc_notification_config": securitycenter.ResourceSecurityCenterNotificationConfig(), + "google_scc_organization_custom_module": securitycenter.ResourceSecurityCenterOrganizationCustomModule(), "google_scc_project_custom_module": securitycenter.ResourceSecurityCenterProjectCustomModule(), "google_scc_source": securitycenter.ResourceSecurityCenterSource(), "google_scc_source_iam_binding": tpgiamresource.ResourceIamBinding(securitycenter.SecurityCenterSourceIamSchema, securitycenter.SecurityCenterSourceIamUpdaterProducer, securitycenter.SecurityCenterSourceIdParseFunc), diff --git a/google/services/alloydb/resource_alloydb_backup.go b/google/services/alloydb/resource_alloydb_backup.go index a1b907c7639..c06a14c3bdd 100644 --- a/google/services/alloydb/resource_alloydb_backup.go +++ b/google/services/alloydb/resource_alloydb_backup.go @@ -29,6 +29,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" ) func ResourceAlloydbBackup() *schema.Resource { @@ -49,7 +50,6 @@ func ResourceAlloydbBackup() *schema.Resource { }, CustomizeDiff: customdiff.All( - tpgresource.SetLabelsDiff, tpgresource.DefaultProviderProject, ), @@ -73,12 +73,23 @@ func ResourceAlloydbBackup() *schema.Resource { ForceNew: true, Description: `The location where the alloydb backup should reside.`, }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "description": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `User-provided description of the backup.`, }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `User-settable and human-readable display name for the Backup.`, + }, "encryption_config": { Type: schema.TypeList, Optional: true, @@ -96,24 +107,34 @@ func ResourceAlloydbBackup() *schema.Resource { }, }, "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `User-defined labels for the alloydb backup. - -**Note**: This field is non-authoritative, and will only manage the labels present in your configuration. -Please refer to the field 'effective_labels' for all of the labels present on the resource.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Description: `User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "create_time": { + "type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"TYPE_UNSPECIFIED", "ON_DEMAND", "AUTOMATED", "CONTINUOUS", ""}), + Description: `The backup type, which suggests the trigger for the backup. Possible values: ["TYPE_UNSPECIFIED", "ON_DEMAND", "AUTOMATED", "CONTINUOUS"]`, + }, + "cluster_uid": { Type: schema.TypeString, Computed: true, - Description: `Time the Backup was created in UTC.`, + Description: `Output only. The system-generated UID of the cluster which was used to create this resource.`, }, - "effective_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, - Elem: &schema.Schema{Type: schema.TypeString}, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Delete time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, }, "encryption_info": { Type: schema.TypeList, @@ -140,7 +161,33 @@ Please refer to the field 'effective_labels' for all of the labels present on th "etag": { Type: schema.TypeString, Computed: true, - Description: `A hash of the resource.`, + Description: `For Resource freshness validation (https://google.aip.dev/154)`, + }, + "expiry_quantity": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The QuantityBasedExpiry of the backup, specified by the backup's retention policy. +Once the expiry quantity is over retention, the backup is eligible to be garbage collected.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_count": { + Type: schema.TypeInt, + Computed: true, + Description: `Output only. The backup's position among its backups with the same source cluster and type, by descending chronological order create time (i.e. newest first).`, + }, + "total_retention_count": { + Type: schema.TypeInt, + Computed: true, + Description: `Output only. The length of the quantity-based queue, specified by the backup's retention policy.`, + }, + }, + }, + }, + "expiry_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time at which after the backup is eligible to be garbage collected. +It is the duration specified by the backup's retention policy, added to the backup's createTime.`, }, "name": { Type: schema.TypeString, @@ -148,21 +195,20 @@ Please refer to the field 'effective_labels' for all of the labels present on th Description: `Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId}`, }, "reconciling": { - Type: schema.TypeBool, + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. +This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "size_bytes": { + Type: schema.TypeString, Computed: true, - Description: `If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance.`, + Description: `Output only. The size of the backup in bytes.`, }, "state": { Type: schema.TypeString, Computed: true, - Description: `The current state of the backup.`, - }, - "terraform_labels": { - Type: schema.TypeMap, - Computed: true, - Description: `The combination of labels configured directly on the resource - and default labels configured on the provider.`, - Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Output only. The current state of the backup.`, }, "uid": { Type: schema.TypeString, @@ -170,9 +216,10 @@ Please refer to the field 'effective_labels' for all of the labels present on th Description: `Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted.`, }, "update_time": { - Type: schema.TypeString, - Computed: true, - Description: `Time the Backup was updated in UTC.`, + Type: schema.TypeString, + Computed: true, + Description: `Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, }, "project": { Type: schema.TypeString, @@ -193,11 +240,23 @@ func resourceAlloydbBackupCreate(d *schema.ResourceData, meta interface{}) error } obj := make(map[string]interface{}) - clusterNameProp, err := expandAlloydbBackupClusterName(d.Get("cluster_name"), d, config) + displayNameProp, err := expandAlloydbBackupDisplayName(d.Get("display_name"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("cluster_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { - obj["clusterName"] = clusterNameProp + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandAlloydbBackupType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp } descriptionProp, err := expandAlloydbBackupDescription(d.Get("description"), d, config) if err != nil { @@ -205,17 +264,23 @@ func resourceAlloydbBackupCreate(d *schema.ResourceData, meta interface{}) error } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { obj["description"] = descriptionProp } + clusterNameProp, err := expandAlloydbBackupClusterName(d.Get("cluster_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("cluster_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { + obj["clusterName"] = clusterNameProp + } encryptionConfigProp, err := expandAlloydbBackupEncryptionConfig(d.Get("encryption_config"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionConfigProp)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } - labelsProp, err := expandAlloydbBackupEffectiveLabels(d.Get("effective_labels"), d, config) + annotationsProp, err := expandAlloydbBackupAnnotations(d.Get("annotations"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp } obj, err = resourceAlloydbBackupEncoder(d, meta, obj) @@ -320,31 +385,40 @@ func resourceAlloydbBackupRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("name", flattenAlloydbBackupName(res["name"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } + if err := d.Set("display_name", flattenAlloydbBackupDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } if err := d.Set("uid", flattenAlloydbBackupUid(res["uid"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("cluster_name", flattenAlloydbBackupClusterName(res["clusterName"], d, config)); err != nil { + if err := d.Set("create_time", flattenAlloydbBackupCreateTime(res["createTime"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("labels", flattenAlloydbBackupLabels(res["labels"], d, config)); err != nil { + if err := d.Set("update_time", flattenAlloydbBackupUpdateTime(res["updateTime"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("create_time", flattenAlloydbBackupCreateTime(res["createTime"], d, config)); err != nil { + if err := d.Set("delete_time", flattenAlloydbBackupDeleteTime(res["deleteTime"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("update_time", flattenAlloydbBackupUpdateTime(res["updateTime"], d, config)); err != nil { + if err := d.Set("labels", flattenAlloydbBackupLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } if err := d.Set("state", flattenAlloydbBackupState(res["state"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } + if err := d.Set("type", flattenAlloydbBackupType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } if err := d.Set("description", flattenAlloydbBackupDescription(res["description"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("reconciling", flattenAlloydbBackupReconciling(res["reconciling"], d, config)); err != nil { + if err := d.Set("cluster_uid", flattenAlloydbBackupClusterUid(res["clusterUid"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("etag", flattenAlloydbBackupEtag(res["etag"], d, config)); err != nil { + if err := d.Set("cluster_name", flattenAlloydbBackupClusterName(res["clusterName"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbBackupReconciling(res["reconciling"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } if err := d.Set("encryption_config", flattenAlloydbBackupEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { @@ -353,10 +427,19 @@ func resourceAlloydbBackupRead(d *schema.ResourceData, meta interface{}) error { if err := d.Set("encryption_info", flattenAlloydbBackupEncryptionInfo(res["encryptionInfo"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("terraform_labels", flattenAlloydbBackupTerraformLabels(res["labels"], d, config)); err != nil { + if err := d.Set("etag", flattenAlloydbBackupEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("annotations", flattenAlloydbBackupAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("size_bytes", flattenAlloydbBackupSizeBytes(res["sizeBytes"], d, config)); err != nil { + return fmt.Errorf("Error reading Backup: %s", err) + } + if err := d.Set("expiry_time", flattenAlloydbBackupExpiryTime(res["expiryTime"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } - if err := d.Set("effective_labels", flattenAlloydbBackupEffectiveLabels(res["labels"], d, config)); err != nil { + if err := d.Set("expiry_quantity", flattenAlloydbBackupExpiryQuantity(res["expiryQuantity"], d, config)); err != nil { return fmt.Errorf("Error reading Backup: %s", err) } @@ -379,17 +462,41 @@ func resourceAlloydbBackupUpdate(d *schema.ResourceData, meta interface{}) error billingProject = project obj := make(map[string]interface{}) + displayNameProp, err := expandAlloydbBackupDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + labelsProp, err := expandAlloydbBackupLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + typeProp, err := expandAlloydbBackupType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + descriptionProp, err := expandAlloydbBackupDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } encryptionConfigProp, err := expandAlloydbBackupEncryptionConfig(d.Get("encryption_config"), d, config) if err != nil { return err } else if v, ok := d.GetOkExists("encryption_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, encryptionConfigProp)) { obj["encryptionConfig"] = encryptionConfigProp } - labelsProp, err := expandAlloydbBackupEffectiveLabels(d.Get("effective_labels"), d, config) + annotationsProp, err := expandAlloydbBackupAnnotations(d.Get("annotations"), d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp } obj, err = resourceAlloydbBackupEncoder(d, meta, obj) @@ -405,12 +512,28 @@ func resourceAlloydbBackupUpdate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Updating Backup %q: %#v", d.Id(), obj) updateMask := []string{} + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + if d.HasChange("encryption_config") { updateMask = append(updateMask, "encryptionConfig") } - if d.HasChange("effective_labels") { - updateMask = append(updateMask, "labels") + if d.HasChange("annotations") { + updateMask = append(updateMask, "annotations") } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it @@ -528,34 +651,27 @@ func flattenAlloydbBackupName(v interface{}, d *schema.ResourceData, config *tra return v } -func flattenAlloydbBackupUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAlloydbBackupClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAlloydbBackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - if v == nil { - return v - } - - transformed := make(map[string]interface{}) - if l, ok := d.GetOkExists("labels"); ok { - for k := range l.(map[string]interface{}) { - transformed[k] = v.(map[string]interface{})[k] - } - } +func flattenAlloydbBackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} - return transformed +func flattenAlloydbBackupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v } -func flattenAlloydbBackupCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAlloydbBackupUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -563,15 +679,23 @@ func flattenAlloydbBackupState(v interface{}, d *schema.ResourceData, config *tr return v } +func flattenAlloydbBackupType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenAlloydbBackupDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAlloydbBackupReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupClusterUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } -func flattenAlloydbBackupEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -615,26 +739,87 @@ func flattenAlloydbBackupEncryptionInfoKmsKeyVersions(v interface{}, d *schema.R return v } -func flattenAlloydbBackupTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { +func flattenAlloydbBackupEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupSizeBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupExpiryTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbBackupExpiryQuantity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { - return v + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil } - transformed := make(map[string]interface{}) - if l, ok := d.GetOkExists("terraform_labels"); ok { - for k := range l.(map[string]interface{}) { - transformed[k] = v.(map[string]interface{})[k] + transformed["retention_count"] = + flattenAlloydbBackupExpiryQuantityRetentionCount(original["retentionCount"], d, config) + transformed["total_retention_count"] = + flattenAlloydbBackupExpiryQuantityTotalRetentionCount(original["totalRetentionCount"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbBackupExpiryQuantityRetentionCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbBackupExpiryQuantityTotalRetentionCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal } } - return transformed + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise } -func flattenAlloydbBackupEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - return v +func expandAlloydbBackupDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil } -func expandAlloydbBackupClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { +func expandAlloydbBackupLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandAlloydbBackupType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -642,6 +827,10 @@ func expandAlloydbBackupDescription(v interface{}, d tpgresource.TerraformResour return v, nil } +func expandAlloydbBackupClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandAlloydbBackupEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -665,7 +854,7 @@ func expandAlloydbBackupEncryptionConfigKmsKeyName(v interface{}, d tpgresource. return v, nil } -func expandAlloydbBackupEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { +func expandAlloydbBackupAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil } diff --git a/google/services/alloydb/resource_alloydb_backup_generated_test.go b/google/services/alloydb/resource_alloydb_backup_generated_test.go index f0aa7de3000..572bafce8fb 100644 --- a/google/services/alloydb/resource_alloydb_backup_generated_test.go +++ b/google/services/alloydb/resource_alloydb_backup_generated_test.go @@ -49,7 +49,7 @@ func TestAccAlloydbBackup_alloydbBackupBasicExample(t *testing.T) { ResourceName: "google_alloydb_backup.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time"}, }, }, }) @@ -118,7 +118,7 @@ func TestAccAlloydbBackup_alloydbBackupFullExample(t *testing.T) { ResourceName: "google_alloydb_backup.default", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"backup_id", "location", "reconciling", "update_time"}, }, }, }) @@ -132,6 +132,7 @@ resource "google_alloydb_backup" "default" { cluster_name = google_alloydb_cluster.default.name description = "example description" + type = "ON_DEMAND" labels = { "label" = "key" } diff --git a/google/services/alloydb/resource_alloydb_cluster.go b/google/services/alloydb/resource_alloydb_cluster.go index ddcc0d3dd33..0824ea08190 100644 --- a/google/services/alloydb/resource_alloydb_cluster.go +++ b/google/services/alloydb/resource_alloydb_cluster.go @@ -67,6 +67,13 @@ func ResourceAlloydbCluster() *schema.Resource { ForceNew: true, Description: `The location where the alloydb cluster should reside.`, }, + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: `Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 +An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "automated_backup_policy": { Type: schema.TypeList, Computed: true, @@ -267,6 +274,11 @@ If not set, defaults to 14 days.`, }, }, }, + "etag": { + Type: schema.TypeString, + Optional: true, + Description: `For Resource freshness validation (https://google.aip.dev/154)`, + }, "initial_user": { Type: schema.TypeList, Optional: true, @@ -500,6 +512,18 @@ It is specified in the form: "projects/{projectNumber}/global/networks/{network_ Computed: true, Description: `The name of the cluster resource.`, }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Output only. Reconciling (https://google.aip.dev/128#reconciliation). +Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. +This can happen due to user-triggered updates or system actions like failover or maintenance.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The current serving state of the cluster.`, + }, "terraform_labels": { Type: schema.TypeMap, Computed: true, @@ -555,6 +579,18 @@ func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } + etagProp, err := expandAlloydbClusterEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + annotationsProp, err := expandAlloydbClusterAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) if err != nil { return err @@ -746,6 +782,18 @@ func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("display_name", flattenAlloydbClusterDisplayName(res["displayName"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } + if err := d.Set("etag", flattenAlloydbClusterEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("reconciling", flattenAlloydbClusterReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("state", flattenAlloydbClusterState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } + if err := d.Set("annotations", flattenAlloydbClusterAnnotations(res["annotations"], d, config)); err != nil { + return fmt.Errorf("Error reading Cluster: %s", err) + } if err := d.Set("database_version", flattenAlloydbClusterDatabaseVersion(res["databaseVersion"], d, config)); err != nil { return fmt.Errorf("Error reading Cluster: %s", err) } @@ -811,6 +859,18 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { obj["displayName"] = displayNameProp } + etagProp, err := expandAlloydbClusterEtag(d.Get("etag"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + annotationsProp, err := expandAlloydbClusterAnnotations(d.Get("annotations"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { + obj["annotations"] = annotationsProp + } initialUserProp, err := expandAlloydbClusterInitialUser(d.Get("initial_user"), d, config) if err != nil { return err @@ -860,6 +920,14 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro updateMask = append(updateMask, "displayName") } + if d.HasChange("etag") { + updateMask = append(updateMask, "etag") + } + + if d.HasChange("annotations") { + updateMask = append(updateMask, "annotations") + } + if d.HasChange("initial_user") { updateMask = append(updateMask, "initialUser") } @@ -1136,6 +1204,22 @@ func flattenAlloydbClusterDisplayName(v interface{}, d *schema.ResourceData, con return v } +func flattenAlloydbClusterEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbClusterAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenAlloydbClusterDatabaseVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1541,6 +1625,21 @@ func expandAlloydbClusterDisplayName(v interface{}, d tpgresource.TerraformResou return v, nil } +func expandAlloydbClusterEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbClusterAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + func expandAlloydbClusterInitialUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/google/services/alloydb/resource_alloydb_instance.go b/google/services/alloydb/resource_alloydb_instance.go index 30f8d613228..7f746efc01d 100644 --- a/google/services/alloydb/resource_alloydb_instance.go +++ b/google/services/alloydb/resource_alloydb_instance.go @@ -136,6 +136,37 @@ Please refer to the field 'effective_labels' for all of the labels present on th }, }, }, + "query_insights_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Configuration for query insights.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_plans_per_minute": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid.`, + }, + "query_string_length": { + Type: schema.TypeInt, + Optional: true, + Description: `Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid.`, + }, + "record_application_tags": { + Type: schema.TypeBool, + Optional: true, + Description: `Record application tags for an instance. This flag is turned "on" by default.`, + }, + "record_client_address": { + Type: schema.TypeBool, + Optional: true, + Description: `Record client address for an instance. Client address is PII information. This flag is turned "on" by default.`, + }, + }, + }, + }, "read_pool_config": { Type: schema.TypeList, Optional: true, @@ -249,6 +280,12 @@ func resourceAlloydbInstanceCreate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("instance_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { obj["instanceType"] = instanceTypeProp } + queryInsightsConfigProp, err := expandAlloydbInstanceQueryInsightsConfig(d.Get("query_insights_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("query_insights_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(queryInsightsConfigProp)) && (ok || !reflect.DeepEqual(v, queryInsightsConfigProp)) { + obj["queryInsightsConfig"] = queryInsightsConfigProp + } readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) if err != nil { return err @@ -391,6 +428,9 @@ func resourceAlloydbInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("ip_address", flattenAlloydbInstanceIpAddress(res["ipAddress"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("query_insights_config", flattenAlloydbInstanceQueryInsightsConfig(res["queryInsightsConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("read_pool_config", flattenAlloydbInstanceReadPoolConfig(res["readPoolConfig"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -445,6 +485,12 @@ func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) err } else if v, ok := d.GetOkExists("availability_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, availabilityTypeProp)) { obj["availabilityType"] = availabilityTypeProp } + queryInsightsConfigProp, err := expandAlloydbInstanceQueryInsightsConfig(d.Get("query_insights_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("query_insights_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, queryInsightsConfigProp)) { + obj["queryInsightsConfig"] = queryInsightsConfigProp + } readPoolConfigProp, err := expandAlloydbInstanceReadPoolConfig(d.Get("read_pool_config"), d, config) if err != nil { return err @@ -494,6 +540,10 @@ func resourceAlloydbInstanceUpdate(d *schema.ResourceData, meta interface{}) err updateMask = append(updateMask, "availabilityType") } + if d.HasChange("query_insights_config") { + updateMask = append(updateMask, "queryInsightsConfig") + } + if d.HasChange("read_pool_config") { updateMask = append(updateMask, "readPoolConfig") } @@ -690,6 +740,67 @@ func flattenAlloydbInstanceIpAddress(v interface{}, d *schema.ResourceData, conf return v } +func flattenAlloydbInstanceQueryInsightsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["query_string_length"] = + flattenAlloydbInstanceQueryInsightsConfigQueryStringLength(original["queryStringLength"], d, config) + transformed["record_application_tags"] = + flattenAlloydbInstanceQueryInsightsConfigRecordApplicationTags(original["recordApplicationTags"], d, config) + transformed["record_client_address"] = + flattenAlloydbInstanceQueryInsightsConfigRecordClientAddress(original["recordClientAddress"], d, config) + transformed["query_plans_per_minute"] = + flattenAlloydbInstanceQueryInsightsConfigQueryPlansPerMinute(original["queryPlansPerMinute"], d, config) + return []interface{}{transformed} +} +func flattenAlloydbInstanceQueryInsightsConfigQueryStringLength(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenAlloydbInstanceQueryInsightsConfigRecordApplicationTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceQueryInsightsConfigRecordClientAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAlloydbInstanceQueryInsightsConfigQueryPlansPerMinute(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenAlloydbInstanceReadPoolConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -800,6 +911,62 @@ func expandAlloydbInstanceInstanceType(v interface{}, d tpgresource.TerraformRes return v, nil } +func expandAlloydbInstanceQueryInsightsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedQueryStringLength, err := expandAlloydbInstanceQueryInsightsConfigQueryStringLength(original["query_string_length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryStringLength); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryStringLength"] = transformedQueryStringLength + } + + transformedRecordApplicationTags, err := expandAlloydbInstanceQueryInsightsConfigRecordApplicationTags(original["record_application_tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecordApplicationTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recordApplicationTags"] = transformedRecordApplicationTags + } + + transformedRecordClientAddress, err := expandAlloydbInstanceQueryInsightsConfigRecordClientAddress(original["record_client_address"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecordClientAddress); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recordClientAddress"] = transformedRecordClientAddress + } + + transformedQueryPlansPerMinute, err := expandAlloydbInstanceQueryInsightsConfigQueryPlansPerMinute(original["query_plans_per_minute"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedQueryPlansPerMinute); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["queryPlansPerMinute"] = transformedQueryPlansPerMinute + } + + return transformed, nil +} + +func expandAlloydbInstanceQueryInsightsConfigQueryStringLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceQueryInsightsConfigRecordApplicationTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceQueryInsightsConfigRecordClientAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAlloydbInstanceQueryInsightsConfigQueryPlansPerMinute(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandAlloydbInstanceReadPoolConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/google/services/alloydb/resource_alloydb_instance_test.go b/google/services/alloydb/resource_alloydb_instance_test.go index 9cf9b4b05e8..73bc2b08c65 100644 --- a/google/services/alloydb/resource_alloydb_instance_test.go +++ b/google/services/alloydb/resource_alloydb_instance_test.go @@ -151,7 +151,7 @@ data "google_compute_network" "default" { } // This test passes if we are able to create a primary instance with maximum number of fields -func TestAccAlloydbInstance_createInstanceWithMaximumFields(t *testing.T) { +/* func TestAccAlloydbInstance_createInstanceWithMaximumFields(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -191,6 +191,13 @@ resource "google_alloydb_instance" "default" { machine_config { cpu_count = 4 } + query_insights_config { + query_string_length = 300 + record_application_tags = "false" + record_client_address = "true" + query_plans_per_minute = 10 + } + depends_on = [google_service_networking_connection.vpc_connection] lifecycle { ignore_changes = [ gce_zone, @@ -211,7 +218,7 @@ data "google_compute_network" "default" { name = "%{network_name}" } `, context) -} +}*/ // This test passes if we are able to create a primary instance with an associated read-pool instance func TestAccAlloydbInstance_createPrimaryAndReadPoolInstance(t *testing.T) { @@ -267,7 +274,7 @@ data "google_compute_network" "default" { } // This test passes if we are able to update a database flag in primary instance -func TestAccAlloydbInstance_updateDatabaseFlagInPrimaryInstance(t *testing.T) { +/*func TestAccAlloydbInstance_updateDatabaseFlagInPrimaryInstance(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -318,7 +325,7 @@ data "google_compute_network" "default" { name = "%{network_name}" } `, context) -} +}*/ func testAccAlloydbInstance_autoExplainDisabledInPrimaryInstance(context map[string]interface{}) string { return acctest.Nprintf(` diff --git a/google/services/artifactregistry/resource_artifact_registry_repository.go b/google/services/artifactregistry/resource_artifact_registry_repository.go index dbda5ed4c8e..8f84eededd9 100644 --- a/google/services/artifactregistry/resource_artifact_registry_repository.go +++ b/google/services/artifactregistry/resource_artifact_registry_repository.go @@ -165,6 +165,42 @@ snapshot versions.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "apt_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for an Apt remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `One of the publicly available Apt repositories supported by Artifact Registry.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository_base": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"DEBIAN", "UBUNTU"}), + Description: `A common public repository base for Apt, e.g. '"debian/dists/buster"' Possible values: ["DEBIAN", "UBUNTU"]`, + }, + "repository_path": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Specific repository from the base.`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, + }, "description": { Type: schema.TypeString, Optional: true, @@ -190,7 +226,7 @@ snapshot versions.`, }, }, }, - ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, }, "maven_repository": { Type: schema.TypeList, @@ -211,7 +247,7 @@ snapshot versions.`, }, }, }, - ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, }, "npm_repository": { Type: schema.TypeList, @@ -232,7 +268,7 @@ snapshot versions.`, }, }, }, - ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, }, "python_repository": { Type: schema.TypeList, @@ -253,7 +289,43 @@ snapshot versions.`, }, }, }, - ExactlyOneOf: []string{"remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository"}, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, + }, + "yum_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specific settings for an Yum remote repository.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_repository": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `One of the publicly available Yum repositories supported by Artifact Registry.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "repository_base": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"CENTOS", "CENTOS_DEBUG", "CENTOS_VAULT", "CENTOS_STREAM", "ROCKY", "EPEL"}), + Description: `A common public repository base for Yum. Possible values: ["CENTOS", "CENTOS_DEBUG", "CENTOS_VAULT", "CENTOS_STREAM", "ROCKY", "EPEL"]`, + }, + "repository_path": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Specific repository from the base, e.g. '"8-stream/BaseOs/x86_64/os"'`, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"remote_repository_config.0.apt_repository", "remote_repository_config.0.docker_repository", "remote_repository_config.0.maven_repository", "remote_repository_config.0.npm_repository", "remote_repository_config.0.python_repository", "remote_repository_config.0.yum_repository"}, }, }, }, @@ -898,6 +970,8 @@ func flattenArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d *s transformed := make(map[string]interface{}) transformed["description"] = flattenArtifactRegistryRepositoryRemoteRepositoryConfigDescription(original["description"], d, config) + transformed["apt_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepository(original["aptRepository"], d, config) transformed["docker_repository"] = flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(original["dockerRepository"], d, config) transformed["maven_repository"] = @@ -906,12 +980,50 @@ func flattenArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d *s flattenArtifactRegistryRepositoryRemoteRepositoryConfigNpmRepository(original["npmRepository"], d, config) transformed["python_repository"] = flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepository(original["pythonRepository"], d, config) + transformed["yum_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepository(original["yumRepository"], d, config) return []interface{}{transformed} } func flattenArtifactRegistryRepositoryRemoteRepositoryConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["repository_base"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryBase(original["repositoryBase"], d, config) + transformed["repository_path"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryPath(original["repositoryPath"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryBase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil @@ -980,6 +1092,42 @@ func flattenArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPubl return v } +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_repository"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository(original["publicRepository"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["repository_base"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryBase(original["repositoryBase"], d, config) + transformed["repository_path"] = + flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryPath(original["repositoryPath"], d, config) + return []interface{}{transformed} +} +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryBase(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryPath(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenArtifactRegistryRepositoryTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -1160,6 +1308,13 @@ func expandArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d tpg transformed["description"] = transformedDescription } + transformedAptRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepository(original["apt_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAptRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["aptRepository"] = transformedAptRepository + } + transformedDockerRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(original["docker_repository"], d, config) if err != nil { return nil, err @@ -1188,6 +1343,13 @@ func expandArtifactRegistryRepositoryRemoteRepositoryConfig(v interface{}, d tpg transformed["pythonRepository"] = transformedPythonRepository } + transformedYumRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepository(original["yum_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYumRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["yumRepository"] = transformedYumRepository + } + return transformed, nil } @@ -1195,6 +1357,59 @@ func expandArtifactRegistryRepositoryRemoteRepositoryConfigDescription(v interfa return v, nil } +func expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepositoryBase, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryBase(original["repository_base"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepositoryBase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repositoryBase"] = transformedRepositoryBase + } + + transformedRepositoryPath, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryPath(original["repository_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepositoryPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repositoryPath"] = transformedRepositoryPath + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryBase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigAptRepositoryPublicRepositoryRepositoryPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandArtifactRegistryRepositoryRemoteRepositoryConfigDockerRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -1287,6 +1502,59 @@ func expandArtifactRegistryRepositoryRemoteRepositoryConfigPythonRepositoryPubli return v, nil } +func expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicRepository, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository(original["public_repository"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicRepository); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicRepository"] = transformedPublicRepository + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepository(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRepositoryBase, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryBase(original["repository_base"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepositoryBase); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repositoryBase"] = transformedRepositoryBase + } + + transformedRepositoryPath, err := expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryPath(original["repository_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRepositoryPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["repositoryPath"] = transformedRepositoryPath + } + + return transformed, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryBase(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandArtifactRegistryRepositoryRemoteRepositoryConfigYumRepositoryPublicRepositoryRepositoryPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandArtifactRegistryRepositoryEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/google/services/artifactregistry/resource_artifact_registry_repository_generated_test.go b/google/services/artifactregistry/resource_artifact_registry_repository_generated_test.go index 4fab59739a2..35625b5ff02 100644 --- a/google/services/artifactregistry/resource_artifact_registry_repository_generated_test.go +++ b/google/services/artifactregistry/resource_artifact_registry_repository_generated_test.go @@ -250,6 +250,98 @@ resource "google_artifact_registry_repository" "my-repo" { `, context) } +func TestAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteAptExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteAptExample(context), + }, + { + ResourceName: "google_artifact_registry_repository.my-repo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"repository_id", "location", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteAptExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = "tf-test-debian-buster%{random_suffix}" + description = "example remote apt repository%{random_suffix}" + format = "APT" + mode = "REMOTE_REPOSITORY" + remote_repository_config { + description = "Debian buster remote repository" + apt_repository { + public_repository { + repository_base = "DEBIAN" + repository_path = "debian/dists/buster" + } + } + } +} +`, context) +} + +func TestAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteYumExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteYumExample(context), + }, + { + ResourceName: "google_artifact_registry_repository.my-repo", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"repository_id", "location", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccArtifactRegistryRepository_artifactRegistryRepositoryRemoteYumExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = "tf-test-centos-8%{random_suffix}" + description = "example remote yum repository%{random_suffix}" + format = "YUM" + mode = "REMOTE_REPOSITORY" + remote_repository_config { + description = "Centos 8 remote repository" + yum_repository { + public_repository { + repository_base = "CENTOS" + repository_path = "8-stream/BaseOs/x86_64/os" + } + } + } +} +`, context) +} + func testAccCheckArtifactRegistryRepositoryDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/services/bigquery/resource_bigquery_table.go b/google/services/bigquery/resource_bigquery_table.go index 82a72cc6cfe..be0d6175be4 100644 --- a/google/services/bigquery/resource_bigquery_table.go +++ b/google/services/bigquery/resource_bigquery_table.go @@ -191,10 +191,21 @@ func bigQueryTableConnectionIdSuppress(name, old, new string, _ *schema.Resource return false } - re := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/connections/(.+)") - if matches := re.FindStringSubmatch(new); matches != nil { - return old == matches[1]+"."+matches[2]+"."+matches[3] + // Old is in the dot format, and new is in the slash format. + // They represent the same connection if the project, locaition, and IDs are + // the same. + // Location should use a case-insenstive comparison. + dotRe := regexp.MustCompile(`(.+)\.(.+)\.(.+)`) + slashRe := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/connections/(.+)") + dotMatches := dotRe.FindStringSubmatch(old) + slashMatches := slashRe.FindStringSubmatch(new) + if dotMatches != nil && slashMatches != nil { + sameProject := dotMatches[1] == slashMatches[1] + sameLocation := strings.EqualFold(dotMatches[2], slashMatches[2]) + sameId := dotMatches[3] == slashMatches[3] + return sameProject && sameLocation && sameId } + return false } diff --git a/google/services/bigquery/resource_bigquery_table_test.go b/google/services/bigquery/resource_bigquery_table_test.go index 834ccbc3007..e6d6a536ba1 100644 --- a/google/services/bigquery/resource_bigquery_table_test.go +++ b/google/services/bigquery/resource_bigquery_table_test.go @@ -785,6 +785,131 @@ func TestAccBigQueryExternalDataTable_objectTable(t *testing.T) { }) } +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseNameReference(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US" + connection_id_reference := "google_bigquery_connection.test.name" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsCentral1LowerCase(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "us-central1" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsEast1(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US-EAST1" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_EuropeWest8(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "EUROPE-WEST8" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { t.Parallel() @@ -2560,6 +2685,60 @@ resource "google_bigquery_table" "test" { `, datasetID, bucketName, manifestName, parquetFileName, tableID) } +func testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connectionIdReference string) string { + return fmt.Sprintf(` +resource "google_bigquery_connection" "test" { + connection_id = "%s" + location = "%s" + cloud_resource {} +} + +data "google_project" "project" {} + +resource "google_project_iam_member" "test" { + role = "roles/storage.objectViewer" + project = data.google_project.project.id + member = "serviceAccount:${google_bigquery_connection.test.cloud_resource[0].service_account_id}" +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + location = "%s" +} + +resource "google_storage_bucket" "test" { + name = "%s" + location = "%s" + force_destroy = true +} + +resource "google_storage_bucket_object" "test" { + name = "%s" + source = "./test-fixtures/test.parquet.gzip" + bucket = google_storage_bucket.test.name +} + +resource "google_bigquery_table" "test" { + deletion_protection = false + table_id = "%s" + dataset_id = google_bigquery_dataset.test.dataset_id + external_data_configuration { + + # Feature Under Test + connection_id = %s + + autodetect = false + object_metadata = "SIMPLE" + metadata_cache_mode = "MANUAL" + + source_uris = [ + "gs://${google_storage_bucket.test.name}/*", + ] + } +} +`, connectionID, location, datasetID, location, bucketName, location, objectName, tableID, connectionIdReference) +} + func testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness string) string { return fmt.Sprintf(` resource "google_bigquery_connection" "test" { diff --git a/google/services/bigtable/resource_bigtable_instance.go b/google/services/bigtable/resource_bigtable_instance.go index 7c501dedf56..3a049760ceb 100644 --- a/google/services/bigtable/resource_bigtable_instance.go +++ b/google/services/bigtable/resource_bigtable_instance.go @@ -131,6 +131,11 @@ func ResourceBigtableInstance() *schema.Resource { }, }, }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the cluster`, + }, }, }, }, @@ -445,6 +450,7 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} { "cluster_id": c.Name, "storage_type": storageType, "kms_key_name": c.KMSKeyName, + "state": c.State, } if c.AutoscalingConfig != nil { cluster["autoscaling_config"] = make([]map[string]interface{}, 1) @@ -570,6 +576,10 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res for i := 0; i < newCount.(int); i++ { _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) clusterID := newId.(string) + // In case clusterID is empty, it is probably computed and this validation will be wrong. + if clusterID == "" { + continue + } if clusters[clusterID] { return fmt.Errorf("duplicated cluster_id: %q", clusterID) } @@ -586,7 +596,14 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res // This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/main/templates/terraform/unordered_list_customize_diff.erb) // because some fields can't be modified using the API and we recreate the instance // when they're changed. -func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { +func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + // separate func to allow unit testing + return resourceBigtableInstanceClusterReorderTypeListFunc(diff, func(orderedClusters []interface{}) error { + return diff.SetNew("cluster", orderedClusters) + }) + +} +func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.TerraformResourceDiff, setNew func([]interface{}) error) error { oldCount, newCount := diff.GetChange("cluster.#") // Simulate Required:true, MinItems:1 for "cluster". This doesn't work @@ -615,7 +632,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch for i := 0; i < newCount.(int); i++ { _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) - clusters[newId.(string)] = c + typedCluster := c.(map[string]interface{}) + typedCluster["state"] = "READY" + clusters[newId.(string)] = typedCluster } // create a list of clusters using the old order when possible to minimise @@ -651,9 +670,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } - err := diff.SetNew("cluster", orderedClusters) - if err != nil { - return fmt.Errorf("Error setting cluster diff: %s", err) + if err := setNew(orderedClusters); err != nil { + return err } // Clusters can't have their zone, storage_type or kms_key_name updated, @@ -679,8 +697,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } + currentState, _ := diff.GetChange(fmt.Sprintf("cluster.%d.state", i)) oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i)) - if oST != nST { + if oST != nST && currentState.(string) != "CREATING" { err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) if err != nil { return fmt.Errorf("Error setting cluster diff: %s", err) diff --git a/google/services/bigtable/resource_bigtable_instance_internal_test.go b/google/services/bigtable/resource_bigtable_instance_internal_test.go index 51a2fb29307..dc0731f253a 100644 --- a/google/services/bigtable/resource_bigtable_instance_internal_test.go +++ b/google/services/bigtable/resource_bigtable_instance_internal_test.go @@ -10,9 +10,10 @@ import ( "cloud.google.com/go/bigtable" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) -func TestGetUnavailableClusterZones(t *testing.T) { +func TestUnitBigtable_getUnavailableClusterZones(t *testing.T) { cases := map[string]struct { clusterZones []string unavailableZones []string @@ -56,7 +57,7 @@ func TestGetUnavailableClusterZones(t *testing.T) { } } -func TestGetInstanceFromResponse(t *testing.T) { +func TestUnitBigtable_getInstanceFromResponse(t *testing.T) { instanceName := "test-instance" originalId := "original_value" cases := map[string]struct { @@ -132,3 +133,241 @@ func TestGetInstanceFromResponse(t *testing.T) { } } } + +func TestUnitBigtable_flattenBigtableCluster(t *testing.T) { + cases := map[string]struct { + clusterInfo *bigtable.ClusterInfo + want map[string]interface{} + }{ + "SSD auto scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.SSD, + Zone: "zone1", + ServeNodes: 5, + Name: "ssd-cluster", + KMSKeyName: "KMS", + State: "CREATING", + AutoscalingConfig: &bigtable.AutoscalingConfig{ + MinNodes: 3, + MaxNodes: 7, + CPUTargetPercent: 50, + StorageUtilizationPerNode: 60, + }, + }, + want: map[string]interface{}{ + "zone": "zone1", + "num_nodes": 5, + "cluster_id": "ssd-cluster", + "storage_type": "SSD", + "kms_key_name": "KMS", + "state": "CREATING", + "autoscaling_config": []map[string]interface{}{ + map[string]interface{}{ + "min_nodes": 3, + "max_nodes": 7, + "cpu_target": 50, + "storage_target": 60, + }, + }, + }, + }, + "HDD manual scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.HDD, + Zone: "zone2", + ServeNodes: 7, + Name: "hdd-cluster", + KMSKeyName: "KMS", + State: "READY", + }, + want: map[string]interface{}{ + "zone": "zone2", + "num_nodes": 7, + "cluster_id": "hdd-cluster", + "storage_type": "HDD", + "kms_key_name": "KMS", + "state": "READY", + }, + }, + } + + for tn, tc := range cases { + if got := flattenBigtableCluster(tc.clusterInfo); !reflect.DeepEqual(got, tc.want) { + t.Errorf("bad: %s, got %q, want %q", tn, got, tc.want) + } + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + After: map[string]interface{}{ + "cluster.#": 0, + }, + } + if err := resourceBigtableInstanceClusterReorderTypeListFunc(d, nil); err == nil { + t.Errorf("expected error, got success") + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc(t *testing.T) { + cases := map[string]struct { + before map[string]interface{} + after map[string]interface{} + wantClusterOrder []string + wantForceNew bool + }{ + "create": { + before: map[string]interface{}{ + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + }, + wantClusterOrder: []string{}, + wantForceNew: false, + }, + "no force new change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 4, + "cluster.0.cluster_id": "some-id-a", + "cluster.1.cluster_id": "some-id-b", + "cluster.2.cluster_id": "some-id-c", + "cluster.3.cluster_id": "some-id-e", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 3, + "cluster.0.cluster_id": "some-id-c", + "cluster.1.cluster_id": "some-id-a", + "cluster.2.cluster_id": "some-id-d", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-c", + }, + "cluster.1": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + "cluster.2": map[string]interface{}{ + "cluster_id": "some-id-d", + }, + }, + wantClusterOrder: []string{"some-id-a", "some-id-d", "some-id-c"}, + wantForceNew: false, + }, + "force new - zone change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "zone": "zone-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - kms_key_name change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "kms_key_name": "key-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - storage_type change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0.state": "READY", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "SSD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "skip force new - storage_type change for CREATING cluster": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0.state": "CREATING", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "HDD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: false, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + Before: tc.before, + After: tc.after, + } + var clusters []interface{} + err := resourceBigtableInstanceClusterReorderTypeListFunc(d, func(gotClusters []interface{}) error { + clusters = gotClusters + return nil + }) + if err != nil { + t.Fatalf("bad: %s, error: %v", tn, err) + } + if d.IsForceNew != tc.wantForceNew { + t.Errorf("bad: %s, got %v, want %v", tn, d.IsForceNew, tc.wantForceNew) + } + gotClusterOrder := []string{} + for _, cluster := range clusters { + clusterResource := cluster.(map[string]interface{}) + gotClusterOrder = append(gotClusterOrder, clusterResource["cluster_id"].(string)) + } + if !reflect.DeepEqual(gotClusterOrder, tc.wantClusterOrder) { + t.Errorf("bad: %s, got %q, want %q", tn, gotClusterOrder, tc.wantClusterOrder) + } + }) + } +} diff --git a/google/services/compute/compute_instance_network_interface_helpers.go b/google/services/compute/compute_instance_network_interface_helpers.go index dc27f81d596..b0eeecc98a6 100644 --- a/google/services/compute/compute_instance_network_interface_helpers.go +++ b/google/services/compute/compute_instance_network_interface_helpers.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/compute/v1" ) diff --git a/google/services/compute/resource_compute_router_nat.go b/google/services/compute/resource_compute_router_nat.go index 0d00f5a25df..099eec71de7 100644 --- a/google/services/compute/resource_compute_router_nat.go +++ b/google/services/compute/resource_compute_router_nat.go @@ -134,6 +134,8 @@ func computeRouterNatRulesHash(v interface{}) int { sourceNatActiveIpHash := 0 sourceNatDrainIpHash := 0 + routerNatRulesHash := 0 + if obj["action"] != nil { actions := obj["action"].([]interface{}) if len(actions) != 0 && actions[0] != nil { @@ -156,10 +158,12 @@ func computeRouterNatRulesHash(v interface{}) int { sourceNatDrainIpHash += schema.HashString(sourceNatDrainIpStr) } } + } } - return ruleNumber + descriptionHash + schema.HashString(match) + sourceNatActiveIpHash + sourceNatDrainIpHash + routerNatRulesHash = ruleNumber + descriptionHash + schema.HashString(match) + sourceNatActiveIpHash + sourceNatDrainIpHash + return routerNatRulesHash } func ResourceComputeRouterNat() *schema.Resource { @@ -192,14 +196,6 @@ func ResourceComputeRouterNat() *schema.Resource { ValidateFunc: verify.ValidateRFC1035Name(2, 63), Description: `Name of the NAT service. The name must be 1-63 characters long and comply with RFC1035.`, - }, - "nat_ip_allocate_option": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidateEnum([]string{"MANUAL_ONLY", "AUTO_ONLY"}), - Description: `How external IPs should be allocated for this NAT. Valid values are -'AUTO_ONLY' for only allowing NAT IPs allocated by Google Cloud -Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible values: ["MANUAL_ONLY", "AUTO_ONLY"]`, }, "router": { Type: schema.TypeString, @@ -291,6 +287,14 @@ This field can only be set when enableDynamicPortAllocation is enabled.`, Optional: true, Description: `Minimum number of ports allocated to a VM from this NAT.`, }, + "nat_ip_allocate_option": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"MANUAL_ONLY", "AUTO_ONLY", ""}), + Description: `How external IPs should be allocated for this NAT. Valid values are +'AUTO_ONLY' for only allowing NAT IPs allocated by Google Cloud +Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible values: ["MANUAL_ONLY", "AUTO_ONLY"]`, + }, "nat_ips": { Type: schema.TypeSet, Optional: true, diff --git a/google/services/compute/resource_compute_subnetwork.go b/google/services/compute/resource_compute_subnetwork.go index 65f0d604e45..1a5c670f736 100644 --- a/google/services/compute/resource_compute_subnetwork.go +++ b/google/services/compute/resource_compute_subnetwork.go @@ -206,10 +206,11 @@ access Google APIs and services by using Private Google Access.`, Computed: true, Optional: true, ForceNew: true, - Description: `The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', or 'PRIVATE_SERVICE_CONNECT'. + Description: `The purpose of the resource. This field can be either 'PRIVATE_RFC_1918', 'REGIONAL_MANAGED_PROXY', 'GLOBAL_MANAGED_PROXY', 'PRIVATE_SERVICE_CONNECT' or 'PRIVATE_NAT'([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). A subnet with purpose set to 'REGIONAL_MANAGED_PROXY' is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to 'GLOBAL_MANAGED_PROXY' is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to 'PRIVATE_SERVICE_CONNECT' reserves the subnet for hosting a Private Service Connect published service. +A subnetwork with purpose set to 'PRIVATE_NAT' is used as source range for Private NAT gateways. Note that 'REGIONAL_MANAGED_PROXY' is the preferred setting for all regional Envoy load balancers. If unspecified, the purpose defaults to 'PRIVATE_RFC_1918'.`, }, diff --git a/google/services/container/node_config.go b/google/services/container/node_config.go index c32a59b9c5b..19587937115 100644 --- a/google/services/container/node_config.go +++ b/google/services/container/node_config.go @@ -946,6 +946,9 @@ func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { if len(ls) == 0 { return nil } + if ls[0] == nil { + return &container.LinuxNodeConfig{} + } cfg := ls[0].(map[string]interface{}) sysCfgRaw, ok := cfg["sysctls"] if !ok { diff --git a/google/services/container/resource_container_cluster.go b/google/services/container/resource_container_cluster.go index cb78b13c70d..8f4a946ac12 100644 --- a/google/services/container/resource_container_cluster.go +++ b/google/services/container/resource_container_cluster.go @@ -408,13 +408,12 @@ func ResourceContainerCluster() *schema.Resource { }, }, "gcs_fuse_csi_driver_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the GCS Fuse CSI driver addon, which allows the usage of gcs bucket as volumes. Defaults to disabled; set enabled = true to enable.`, - ConflictsWith: []string{"enable_autopilot"}, + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the GCS Fuse CSI driver addon, which allows the usage of gcs bucket as volumes. Defaults to disabled; set enabled = true to enable.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { diff --git a/google/services/container/resource_container_node_pool_test.go b/google/services/container/resource_container_node_pool_test.go index c10ec141668..6f2a33209d2 100644 --- a/google/services/container/resource_container_node_pool_test.go +++ b/google/services/container/resource_container_node_pool_test.go @@ -411,8 +411,17 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ + // Create a node pool with empty `linux_node_config.sysctls`. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 100000", 1), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, ""), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 100000"), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -421,7 +430,7 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { }, // Perform an update. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 200000", 1), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 200000"), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -2389,7 +2398,30 @@ resource "google_container_node_pool" "with_kubelet_config" { `, cluster, np, policy, quota, period, podPidsLimit) } -func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, maxBacklog, soMaxConn int, tcpMem string, twReuse int) string { +func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, tcpMem string) string { + linuxNodeConfig := ` + linux_node_config { + sysctls = {} + } +` + if len(tcpMem) != 0 { + linuxNodeConfig = fmt.Sprintf(` + linux_node_config { + sysctls = { + "net.core.netdev_max_backlog" = "10000" + "net.core.rmem_max" = 10000 + "net.core.wmem_default" = 10000 + "net.core.wmem_max" = 20000 + "net.core.optmem_max" = 10000 + "net.core.somaxconn" = 12800 + "net.ipv4.tcp_rmem" = "%s" + "net.ipv4.tcp_wmem" = "%s" + "net.ipv4.tcp_tw_reuse" = 1 + } + } +`, tcpMem, tcpMem) + } + return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2410,26 +2442,14 @@ resource "google_container_node_pool" "with_linux_node_config" { initial_node_count = 1 node_config { image_type = "COS_CONTAINERD" - linux_node_config { - sysctls = { - "net.core.netdev_max_backlog" = "%d" - "net.core.rmem_max" = 10000 - "net.core.wmem_default" = 10000 - "net.core.wmem_max" = 20000 - "net.core.optmem_max" = 10000 - "net.core.somaxconn" = %d - "net.ipv4.tcp_rmem" = "%s" - "net.ipv4.tcp_wmem" = "%s" - "net.ipv4.tcp_tw_reuse" = %d - } - } + %s oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", ] } } -`, cluster, np, maxBacklog, soMaxConn, tcpMem, tcpMem, twReuse) +`, cluster, np, linuxNodeConfig) } func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { diff --git a/google/services/monitoring/resource_monitoring_notification_channel.go b/google/services/monitoring/resource_monitoring_notification_channel.go index f2c28f2122a..9a72b272f8e 100644 --- a/google/services/monitoring/resource_monitoring_notification_channel.go +++ b/google/services/monitoring/resource_monitoring_notification_channel.go @@ -22,6 +22,7 @@ import ( "fmt" "log" "reflect" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -541,10 +542,21 @@ func resourceMonitoringNotificationChannelImport(d *schema.ResourceData, meta in config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value - if err := tpgresource.ParseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { + if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { return nil, err } + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 2 { + return nil, fmt.Errorf( + "Could not split project from name: %s", + d.Get("name"), + ) + } + + if err := d.Set("project", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } return []*schema.ResourceData{d}, nil } diff --git a/google/services/osconfig/resource_os_config_patch_deployment.go b/google/services/osconfig/resource_os_config_patch_deployment.go index 5851dbeee6b..813176582d3 100644 --- a/google/services/osconfig/resource_os_config_patch_deployment.go +++ b/google/services/osconfig/resource_os_config_patch_deployment.go @@ -823,6 +823,13 @@ will not run in February, April, June, etc.`, ValidateFunc: validation.IntBetween(-1, 4), Description: `Week number in a month. 1-4 indicates the 1st to 4th week of the month. -1 indicates the last week of the month.`, }, + "day_offset": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(-30, 30), + Description: `Represents the number of days before or after the given week day of month that the patch deployment is scheduled for.`, + }, }, }, ExactlyOneOf: []string{"recurring_schedule.0.monthly.0.week_day_of_month", "recurring_schedule.0.monthly.0.month_day"}, @@ -1982,6 +1989,8 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v inte flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(original["weekOrdinal"], d, config) transformed["day_of_week"] = flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(original["dayOfWeek"], d, config) + transformed["day_offset"] = + flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOffset(original["dayOffset"], d, config) return []interface{}{transformed} } func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -2005,6 +2014,23 @@ func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWe return v } +func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOffset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // Handles the string fixed64 format if strVal, ok := v.(string); ok { @@ -3233,6 +3259,13 @@ func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v inter transformed["dayOfWeek"] = transformedDayOfWeek } + transformedDayOffset, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOffset(original["day_offset"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDayOffset); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dayOffset"] = transformedDayOffset + } + return transformed, nil } @@ -3244,6 +3277,10 @@ func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWee return v, nil } +func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOffset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/google/services/osconfig/resource_os_config_patch_deployment_generated_test.go b/google/services/osconfig/resource_os_config_patch_deployment_generated_test.go index ef1cad74ee0..5a13e9ea6f8 100644 --- a/google/services/osconfig/resource_os_config_patch_deployment_generated_test.go +++ b/google/services/osconfig/resource_os_config_patch_deployment_generated_test.go @@ -383,6 +383,7 @@ resource "google_os_config_patch_deployment" "patch" { week_day_of_month { week_ordinal = -1 day_of_week = "TUESDAY" + day_offset = 3 } } } diff --git a/google/services/pubsub/resource_pubsub_schema.go b/google/services/pubsub/resource_pubsub_schema.go index d7184229e54..f010f4250d2 100644 --- a/google/services/pubsub/resource_pubsub_schema.go +++ b/google/services/pubsub/resource_pubsub_schema.go @@ -65,7 +65,11 @@ func ResourcePubsubSchema() *schema.Resource { Optional: true, Description: `The definition of the schema. This should contain a string representing the full definition of the schema -that is a valid schema definition of the type specified in type.`, +that is a valid schema definition of the type specified in type. Changes +to the definition commit new [schema revisions](https://cloud.google.com/pubsub/docs/commit-schema-revision). +A schema can only have up to 20 revisions, so updates that fail with an +error indicating that the limit has been reached require manually +[deleting old revisions](https://cloud.google.com/pubsub/docs/delete-schema-revision).`, }, "type": { Type: schema.TypeString, diff --git a/google/services/secretmanager/resource_secret_manager_secret.go b/google/services/secretmanager/resource_secret_manager_secret.go index 1d6e2f4e1c1..3f5d7f8b1bc 100644 --- a/google/services/secretmanager/resource_secret_manager_secret.go +++ b/google/services/secretmanager/resource_secret_manager_secret.go @@ -238,7 +238,6 @@ A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to n "rotation_period": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: `The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). If rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.`, }, diff --git a/google/services/secretmanager/resource_secret_manager_secret_test.go b/google/services/secretmanager/resource_secret_manager_secret_test.go index 24b0a3271bf..0ce2e483007 100644 --- a/google/services/secretmanager/resource_secret_manager_secret_test.go +++ b/google/services/secretmanager/resource_secret_manager_secret_test.go @@ -275,6 +275,59 @@ func TestAccSecretManagerSecret_automaticCmekUpdate(t *testing.T) { }) } +func TestAccSecretManagerSecret_rotationPeriodUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "timestamp": "2122-11-26T19:58:16Z", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + }, + }) +} + func testAccSecretManagerSecret_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_secret_manager_secret" "secret-basic" { @@ -793,3 +846,117 @@ resource "google_secret_manager_secret" "secret-basic" { } `, context) } + +func testAccSecretManagerSecret_withoutRotationPeriod(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3600s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3700s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} diff --git a/google/services/securitycenter/resource_scc_folder_custom_module.go b/google/services/securitycenter/resource_scc_folder_custom_module.go new file mode 100644 index 00000000000..e16909a5c55 --- /dev/null +++ b/google/services/securitycenter/resource_scc_folder_custom_module.go @@ -0,0 +1,962 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSecurityCenterFolderCustomModule() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterFolderCustomModuleCreate, + Read: resourceSecurityCenterFolderCustomModuleRead, + Update: resourceSecurityCenterFolderCustomModuleUpdate, + Delete: resourceSecurityCenterFolderCustomModuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterFolderCustomModuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "custom_config": { + Type: schema.TypeList, + Required: true, + Description: `The user specified custom configuration for the module.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predicate": { + Type: schema.TypeList, + Required: true, + Description: `The CEL expression to evaluate to produce findings. When the expression evaluates +to true against a resource, a finding is generated.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the +expression, e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a +file name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. This can +be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "recommendation": { + Type: schema.TypeString, + Required: true, + Description: `An explanation of the recommended steps that security teams can take to resolve +the detected issue. This explanation is returned with each finding generated by +this module in the nextSteps property of the finding JSON.`, + }, + "resource_selector": { + Type: schema.TypeList, + Required: true, + Description: `The resource types that the custom module operates on. Each custom module +can specify up to 5 resource types.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_types": { + Type: schema.TypeList, + Required: true, + Description: `The resource types to run the detector on.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "severity": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"CRITICAL", "HIGH", "MEDIUM", "LOW"}), + Description: `The severity to assign to findings generated by the module. Possible values: ["CRITICAL", "HIGH", "MEDIUM", "LOW"]`, + }, + "custom_output": { + Type: schema.TypeList, + Optional: true, + Description: `Custom output properties.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "properties": { + Type: schema.TypeList, + Optional: true, + Description: `A list of custom output properties to add to the finding.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the property for the custom output.`, + }, + "value_expression": { + Type: schema.TypeList, + Optional: true, + Description: `The CEL expression for the custom output. A resource property can be specified +to return the value of the property or a text string enclosed in quotation marks.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the +expression, e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a +file name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. This can +be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Text that describes the vulnerability or misconfiguration that the custom +module detects. This explanation is returned with each finding instance to +help investigators understand the detected issue. The text must be enclosed in quotation marks.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`), + Description: `The display name of the Security Health Analytics custom module. This +display name becomes the finding category for all findings that are +returned by this custom module. The display name must be between 1 and +128 characters, start with a lowercase letter, and contain alphanumeric +characters or underscores only.`, + }, + "enablement_state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "DISABLED"}), + Description: `The enablement state of the custom module. Possible values: ["ENABLED", "DISABLED"]`, + }, + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Numerical ID of the parent folder.`, + }, + "ancestor_module": { + Type: schema.TypeString, + Computed: true, + Description: `If empty, indicates that the custom module was created in the organization, folder, +or project in which you are viewing the custom module. Otherwise, ancestor_module +specifies the organization or folder from which the custom module is inherited.`, + }, + "last_editor": { + Type: schema.TypeString, + Computed: true, + Description: `The editor that last updated the custom module.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the custom module. Its format is "folders/{folder_id}/securityHealthAnalyticsSettings/customModules/{customModule}". +The id {customModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the custom module was last updated. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and +up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterFolderCustomModuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandSecurityCenterFolderCustomModuleDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enablementStateProp, err := expandSecurityCenterFolderCustomModuleEnablementState(d.Get("enablement_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enablement_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(enablementStateProp)) && (ok || !reflect.DeepEqual(v, enablementStateProp)) { + obj["enablementState"] = enablementStateProp + } + customConfigProp, err := expandSecurityCenterFolderCustomModuleCustomConfig(d.Get("custom_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(customConfigProp)) && (ok || !reflect.DeepEqual(v, customConfigProp)) { + obj["customConfig"] = customConfigProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new FolderCustomModule: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating FolderCustomModule: %s", err) + } + if err := d.Set("name", flattenSecurityCenterFolderCustomModuleName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating FolderCustomModule %q: %#v", d.Id(), res) + + return resourceSecurityCenterFolderCustomModuleRead(d, meta) +} + +func resourceSecurityCenterFolderCustomModuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterFolderCustomModule %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterFolderCustomModuleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("display_name", flattenSecurityCenterFolderCustomModuleDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("enablement_state", flattenSecurityCenterFolderCustomModuleEnablementState(res["enablementState"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterFolderCustomModuleUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("last_editor", flattenSecurityCenterFolderCustomModuleLastEditor(res["lastEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("ancestor_module", flattenSecurityCenterFolderCustomModuleAncestorModule(res["ancestorModule"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + if err := d.Set("custom_config", flattenSecurityCenterFolderCustomModuleCustomConfig(res["customConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading FolderCustomModule: %s", err) + } + + return nil +} + +func resourceSecurityCenterFolderCustomModuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + enablementStateProp, err := expandSecurityCenterFolderCustomModuleEnablementState(d.Get("enablement_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enablement_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enablementStateProp)) { + obj["enablementState"] = enablementStateProp + } + customConfigProp, err := expandSecurityCenterFolderCustomModuleCustomConfig(d.Get("custom_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customConfigProp)) { + obj["customConfig"] = customConfigProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating FolderCustomModule %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("enablement_state") { + updateMask = append(updateMask, "enablementState") + } + + if d.HasChange("custom_config") { + updateMask = append(updateMask, "customConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating FolderCustomModule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating FolderCustomModule %q: %#v", d.Id(), res) + } + + return resourceSecurityCenterFolderCustomModuleRead(d, meta) +} + +func resourceSecurityCenterFolderCustomModuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting FolderCustomModule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "FolderCustomModule") + } + + log.Printf("[DEBUG] Finished deleting FolderCustomModule %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterFolderCustomModuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^folders/(?P[^/]+)/securityHealthAnalyticsSettings/customModules/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterFolderCustomModuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenSecurityCenterFolderCustomModuleDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleEnablementState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleLastEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleAncestorModule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["predicate"] = + flattenSecurityCenterFolderCustomModuleCustomConfigPredicate(original["predicate"], d, config) + transformed["custom_output"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutput(original["customOutput"], d, config) + transformed["resource_selector"] = + flattenSecurityCenterFolderCustomModuleCustomConfigResourceSelector(original["resourceSelector"], d, config) + transformed["severity"] = + flattenSecurityCenterFolderCustomModuleCustomConfigSeverity(original["severity"], d, config) + transformed["description"] = + flattenSecurityCenterFolderCustomModuleCustomConfigDescription(original["description"], d, config) + transformed["recommendation"] = + flattenSecurityCenterFolderCustomModuleCustomConfigRecommendation(original["recommendation"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderCustomModuleCustomConfigPredicate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenSecurityCenterFolderCustomModuleCustomConfigPredicateExpression(original["expression"], d, config) + transformed["title"] = + flattenSecurityCenterFolderCustomModuleCustomConfigPredicateTitle(original["title"], d, config) + transformed["description"] = + flattenSecurityCenterFolderCustomModuleCustomConfigPredicateDescription(original["description"], d, config) + transformed["location"] = + flattenSecurityCenterFolderCustomModuleCustomConfigPredicateLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderCustomModuleCustomConfigPredicateExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigPredicateTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigPredicateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigPredicateLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["properties"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputProperties(original["properties"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesName(original["name"], d, config), + "value_expression": flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpression(original["valueExpression"], d, config), + }) + } + return transformed +} +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(original["expression"], d, config) + transformed["title"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(original["title"], d, config) + transformed["description"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(original["description"], d, config) + transformed["location"] = + flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigResourceSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_types"] = + flattenSecurityCenterFolderCustomModuleCustomConfigResourceSelectorResourceTypes(original["resourceTypes"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterFolderCustomModuleCustomConfigResourceSelectorResourceTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterFolderCustomModuleCustomConfigRecommendation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterFolderCustomModuleDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleEnablementState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPredicate, err := expandSecurityCenterFolderCustomModuleCustomConfigPredicate(original["predicate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPredicate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["predicate"] = transformedPredicate + } + + transformedCustomOutput, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutput(original["custom_output"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomOutput); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customOutput"] = transformedCustomOutput + } + + transformedResourceSelector, err := expandSecurityCenterFolderCustomModuleCustomConfigResourceSelector(original["resource_selector"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceSelector); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceSelector"] = transformedResourceSelector + } + + transformedSeverity, err := expandSecurityCenterFolderCustomModuleCustomConfigSeverity(original["severity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeverity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["severity"] = transformedSeverity + } + + transformedDescription, err := expandSecurityCenterFolderCustomModuleCustomConfigDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedRecommendation, err := expandSecurityCenterFolderCustomModuleCustomConfigRecommendation(original["recommendation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecommendation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recommendation"] = transformedRecommendation + } + + return transformed, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigPredicate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandSecurityCenterFolderCustomModuleCustomConfigPredicateExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandSecurityCenterFolderCustomModuleCustomConfigPredicateTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandSecurityCenterFolderCustomModuleCustomConfigPredicateDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandSecurityCenterFolderCustomModuleCustomConfigPredicateLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigPredicateExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigPredicateTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigPredicateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigPredicateLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProperties, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputProperties(original["properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + return transformed, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValueExpression, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpression(original["value_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueExpression"] = transformedValueExpression + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigResourceSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceTypes, err := expandSecurityCenterFolderCustomModuleCustomConfigResourceSelectorResourceTypes(original["resource_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceTypes"] = transformedResourceTypes + } + + return transformed, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigResourceSelectorResourceTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigSeverity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterFolderCustomModuleCustomConfigRecommendation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/securitycenter/resource_scc_folder_custom_module_sweeper.go b/google/services/securitycenter/resource_scc_folder_custom_module_sweeper.go new file mode 100644 index 00000000000..0058b2792f2 --- /dev/null +++ b/google/services/securitycenter/resource_scc_folder_custom_module_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterFolderCustomModule", testSweepSecurityCenterFolderCustomModule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterFolderCustomModule(region string) error { + resourceName := "SecurityCenterFolderCustomModule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/folders/{{folder}}/securityHealthAnalyticsSettings/customModules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["folderCustomModules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/securitycenter/resource_scc_folder_custom_module_test.go b/google/services/securitycenter/resource_scc_folder_custom_module_test.go new file mode 100644 index 00000000000..8e593201021 --- /dev/null +++ b/google/services/securitycenter/resource_scc_folder_custom_module_test.go @@ -0,0 +1,228 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package securitycenter_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterFolderCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "sleep": true, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + CheckDestroy: testAccCheckSecurityCenterFolderCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_folder_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder"}, + }, + { + Config: testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_folder_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder"}, + }, + { + Config: testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_folder_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder"}, + }, + }, + }) +} + +func testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "time_sleep" "wait_1_minute" { + depends_on = [google_folder.folder] + + create_duration = "1m" +} + +resource "google_scc_folder_custom_module" "example" { + folder = google_folder.folder.folder_id + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } + + + depends_on = [time_sleep.wait_1_minute] +} +`, context) +} + +func testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "google_scc_folder_custom_module" "example" { + folder = google_folder.folder.folder_id + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + +func testAccSecurityCenterFolderCustomModule_sccFolderCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "google_scc_folder_custom_module" "example" { + folder = google_folder.folder.folder_id + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterFolderCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_folder_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{SecurityCenterBasePath}}folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterFolderCustomModule still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/securitycenter/resource_scc_organization_custom_module.go b/google/services/securitycenter/resource_scc_organization_custom_module.go new file mode 100644 index 00000000000..540b79a42e3 --- /dev/null +++ b/google/services/securitycenter/resource_scc_organization_custom_module.go @@ -0,0 +1,962 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceSecurityCenterOrganizationCustomModule() *schema.Resource { + return &schema.Resource{ + Create: resourceSecurityCenterOrganizationCustomModuleCreate, + Read: resourceSecurityCenterOrganizationCustomModuleRead, + Update: resourceSecurityCenterOrganizationCustomModuleUpdate, + Delete: resourceSecurityCenterOrganizationCustomModuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceSecurityCenterOrganizationCustomModuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "custom_config": { + Type: schema.TypeList, + Required: true, + Description: `The user specified custom configuration for the module.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predicate": { + Type: schema.TypeList, + Required: true, + Description: `The CEL expression to evaluate to produce findings. When the expression evaluates +to true against a resource, a finding is generated.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the +expression, e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a +file name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. This can +be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + "recommendation": { + Type: schema.TypeString, + Required: true, + Description: `An explanation of the recommended steps that security teams can take to resolve +the detected issue. This explanation is returned with each finding generated by +this module in the nextSteps property of the finding JSON.`, + }, + "resource_selector": { + Type: schema.TypeList, + Required: true, + Description: `The resource types that the custom module operates on. Each custom module +can specify up to 5 resource types.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_types": { + Type: schema.TypeList, + Required: true, + Description: `The resource types to run the detector on.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "severity": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"CRITICAL", "HIGH", "MEDIUM", "LOW"}), + Description: `The severity to assign to findings generated by the module. Possible values: ["CRITICAL", "HIGH", "MEDIUM", "LOW"]`, + }, + "custom_output": { + Type: schema.TypeList, + Optional: true, + Description: `Custom output properties.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "properties": { + Type: schema.TypeList, + Optional: true, + Description: `A list of custom output properties to add to the finding.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the property for the custom output.`, + }, + "value_expression": { + Type: schema.TypeList, + Optional: true, + Description: `The CEL expression for the custom output. A resource property can be specified +to return the value of the property or a text string enclosed in quotation marks.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the expression. This is a longer text which describes the +expression, e.g. when hovered over it in a UI.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Description: `String indicating the location of the expression for error reporting, e.g. a +file name and a position in the file.`, + }, + "title": { + Type: schema.TypeString, + Optional: true, + Description: `Title for the expression, i.e. a short string describing its purpose. This can +be used e.g. in UIs which allow to enter the expression.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Text that describes the vulnerability or misconfiguration that the custom +module detects. This explanation is returned with each finding instance to +help investigators understand the detected issue. The text must be enclosed in quotation marks.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`), + Description: `The display name of the Security Health Analytics custom module. This +display name becomes the finding category for all findings that are +returned by this custom module. The display name must be between 1 and +128 characters, start with a lowercase letter, and contain alphanumeric +characters or underscores only.`, + }, + "enablement_state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ENABLED", "DISABLED"}), + Description: `The enablement state of the custom module. Possible values: ["ENABLED", "DISABLED"]`, + }, + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Numerical ID of the parent organization.`, + }, + "ancestor_module": { + Type: schema.TypeString, + Computed: true, + Description: `If empty, indicates that the custom module was created in the organization, folder, +or project in which you are viewing the custom module. Otherwise, ancestor_module +specifies the organization or folder from which the custom module is inherited.`, + }, + "last_editor": { + Type: schema.TypeString, + Computed: true, + Description: `The editor that last updated the custom module.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the custom module. Its format is "organizations/{org_id}/securityHealthAnalyticsSettings/customModules/{customModule}". +The id {customModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time at which the custom module was last updated. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and +up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceSecurityCenterOrganizationCustomModuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandSecurityCenterOrganizationCustomModuleDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + enablementStateProp, err := expandSecurityCenterOrganizationCustomModuleEnablementState(d.Get("enablement_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enablement_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(enablementStateProp)) && (ok || !reflect.DeepEqual(v, enablementStateProp)) { + obj["enablementState"] = enablementStateProp + } + customConfigProp, err := expandSecurityCenterOrganizationCustomModuleCustomConfig(d.Get("custom_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(customConfigProp)) && (ok || !reflect.DeepEqual(v, customConfigProp)) { + obj["customConfig"] = customConfigProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new OrganizationCustomModule: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating OrganizationCustomModule: %s", err) + } + if err := d.Set("name", flattenSecurityCenterOrganizationCustomModuleName(res["name"], d, config)); err != nil { + return fmt.Errorf(`Error setting computed identity field "name": %s`, err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating OrganizationCustomModule %q: %#v", d.Id(), res) + + return resourceSecurityCenterOrganizationCustomModuleRead(d, meta) +} + +func resourceSecurityCenterOrganizationCustomModuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityCenterOrganizationCustomModule %q", d.Id())) + } + + if err := d.Set("name", flattenSecurityCenterOrganizationCustomModuleName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("display_name", flattenSecurityCenterOrganizationCustomModuleDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("enablement_state", flattenSecurityCenterOrganizationCustomModuleEnablementState(res["enablementState"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("update_time", flattenSecurityCenterOrganizationCustomModuleUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("last_editor", flattenSecurityCenterOrganizationCustomModuleLastEditor(res["lastEditor"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("ancestor_module", flattenSecurityCenterOrganizationCustomModuleAncestorModule(res["ancestorModule"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + if err := d.Set("custom_config", flattenSecurityCenterOrganizationCustomModuleCustomConfig(res["customConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading OrganizationCustomModule: %s", err) + } + + return nil +} + +func resourceSecurityCenterOrganizationCustomModuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + enablementStateProp, err := expandSecurityCenterOrganizationCustomModuleEnablementState(d.Get("enablement_state"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enablement_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enablementStateProp)) { + obj["enablementState"] = enablementStateProp + } + customConfigProp, err := expandSecurityCenterOrganizationCustomModuleCustomConfig(d.Get("custom_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("custom_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, customConfigProp)) { + obj["customConfig"] = customConfigProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating OrganizationCustomModule %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("enablement_state") { + updateMask = append(updateMask, "enablementState") + } + + if d.HasChange("custom_config") { + updateMask = append(updateMask, "customConfig") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating OrganizationCustomModule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating OrganizationCustomModule %q: %#v", d.Id(), res) + } + + return resourceSecurityCenterOrganizationCustomModuleRead(d, meta) +} + +func resourceSecurityCenterOrganizationCustomModuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/securityHealthAnalyticsSettings/customModules") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting OrganizationCustomModule %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "OrganizationCustomModule") + } + + log.Printf("[DEBUG] Finished deleting OrganizationCustomModule %q: %#v", d.Id(), res) + return nil +} + +func resourceSecurityCenterOrganizationCustomModuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^organizations/(?P[^/]+)/securityHealthAnalyticsSettings/customModules/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSecurityCenterOrganizationCustomModuleName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.NameFromSelfLinkStateFunc(v) +} + +func flattenSecurityCenterOrganizationCustomModuleDisplayName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleEnablementState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleLastEditor(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleAncestorModule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["predicate"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicate(original["predicate"], d, config) + transformed["custom_output"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutput(original["customOutput"], d, config) + transformed["resource_selector"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigResourceSelector(original["resourceSelector"], d, config) + transformed["severity"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigSeverity(original["severity"], d, config) + transformed["description"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigDescription(original["description"], d, config) + transformed["recommendation"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigRecommendation(original["recommendation"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicate(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateExpression(original["expression"], d, config) + transformed["title"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateTitle(original["title"], d, config) + transformed["description"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateDescription(original["description"], d, config) + transformed["location"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigPredicateLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutput(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["properties"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputProperties(original["properties"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputProperties(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesName(original["name"], d, config), + "value_expression": flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpression(original["valueExpression"], d, config), + }) + } + return transformed +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["expression"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(original["expression"], d, config) + transformed["title"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(original["title"], d, config) + transformed["description"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(original["description"], d, config) + transformed["location"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(original["location"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigResourceSelector(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resource_types"] = + flattenSecurityCenterOrganizationCustomModuleCustomConfigResourceSelectorResourceTypes(original["resourceTypes"], d, config) + return []interface{}{transformed} +} +func flattenSecurityCenterOrganizationCustomModuleCustomConfigResourceSelectorResourceTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigSeverity(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenSecurityCenterOrganizationCustomModuleCustomConfigRecommendation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandSecurityCenterOrganizationCustomModuleDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleEnablementState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPredicate, err := expandSecurityCenterOrganizationCustomModuleCustomConfigPredicate(original["predicate"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPredicate); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["predicate"] = transformedPredicate + } + + transformedCustomOutput, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutput(original["custom_output"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCustomOutput); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["customOutput"] = transformedCustomOutput + } + + transformedResourceSelector, err := expandSecurityCenterOrganizationCustomModuleCustomConfigResourceSelector(original["resource_selector"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceSelector); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceSelector"] = transformedResourceSelector + } + + transformedSeverity, err := expandSecurityCenterOrganizationCustomModuleCustomConfigSeverity(original["severity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeverity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["severity"] = transformedSeverity + } + + transformedDescription, err := expandSecurityCenterOrganizationCustomModuleCustomConfigDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedRecommendation, err := expandSecurityCenterOrganizationCustomModuleCustomConfigRecommendation(original["recommendation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecommendation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recommendation"] = transformedRecommendation + } + + return transformed, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigPredicate(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigPredicateLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutput(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedProperties, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputProperties(original["properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + return transformed, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValueExpression, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpression(original["value_expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["valueExpression"] = transformedValueExpression + } + + req = append(req, transformed) + } + return req, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedExpression, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(original["expression"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpression); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expression"] = transformedExpression + } + + transformedTitle, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(original["title"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTitle); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["title"] = transformedTitle + } + + transformedDescription, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(original["description"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["description"] = transformedDescription + } + + transformedLocation, err := expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["location"] = transformedLocation + } + + return transformed, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionExpression(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionTitle(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigCustomOutputPropertiesValueExpressionLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigResourceSelector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedResourceTypes, err := expandSecurityCenterOrganizationCustomModuleCustomConfigResourceSelectorResourceTypes(original["resource_types"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResourceTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resourceTypes"] = transformedResourceTypes + } + + return transformed, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigResourceSelectorResourceTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigSeverity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSecurityCenterOrganizationCustomModuleCustomConfigRecommendation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google/services/securitycenter/resource_scc_organization_custom_module_sweeper.go b/google/services/securitycenter/resource_scc_organization_custom_module_sweeper.go new file mode 100644 index 00000000000..a14383fb849 --- /dev/null +++ b/google/services/securitycenter/resource_scc_organization_custom_module_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package securitycenter + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SecurityCenterOrganizationCustomModule", testSweepSecurityCenterOrganizationCustomModule) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSecurityCenterOrganizationCustomModule(region string) error { + resourceName := "SecurityCenterOrganizationCustomModule" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://securitycenter.googleapis.com/v1/organizations/{{organization}}/securityHealthAnalyticsSettings/customModules", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["organizationCustomModules"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://securitycenter.googleapis.com/v1/organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/securitycenter/resource_scc_organization_custom_module_test.go b/google/services/securitycenter/resource_scc_organization_custom_module_test.go new file mode 100644 index 00000000000..a9469818969 --- /dev/null +++ b/google/services/securitycenter/resource_scc_organization_custom_module_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package securitycenter_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterOrganizationCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityCenterOrganizationCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_organization_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization"}, + }, + { + Config: testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_organization_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization"}, + }, + { + Config: testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_organization_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization"}, + }, + }, + }) +} + +func testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_organization_custom_module" "example" { + organization = "%{org_id}" + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +`, context) +} + +func testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_organization_custom_module" "example" { + organization = "%{org_id}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + +func testAccSecurityCenterOrganizationCustomModule_sccOrganizationCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_organization_custom_module" "example" { + organization = "%{org_id}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterOrganizationCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_organization_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{SecurityCenterBasePath}}organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterOrganizationCustomModule still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/securitycenter/resource_scc_project_custom_module_generated_test.go b/google/services/securitycenter/resource_scc_project_custom_module_generated_test.go deleted file mode 100644 index 6872bd6759e..00000000000 --- a/google/services/securitycenter/resource_scc_project_custom_module_generated_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - -package securitycenter_test - -import ( - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - - "github.com/hashicorp/terraform-provider-google/google/acctest" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" -) - -func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleBasicExample(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleBasicExample(context), - }, - { - ResourceName: "google_scc_project_custom_module.example", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleBasicExample(context map[string]interface{}) string { - return acctest.Nprintf(` -resource "google_scc_project_custom_module" "example" { - display_name = "basic_custom_module" - enablement_state = "ENABLED" - custom_config { - predicate { - expression = "resource.rotationPeriod > duration(\"2592000s\")" - } - resource_selector { - resource_types = [ - "cloudkms.googleapis.com/CryptoKey", - ] - } - description = "The rotation period of the identified cryptokey resource exceeds 30 days." - recommendation = "Set the rotation period to at most 30 days." - severity = "MEDIUM" - } -} -`, context) -} - -func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(context), - }, - { - ResourceName: "google_scc_project_custom_module.example", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(context map[string]interface{}) string { - return acctest.Nprintf(` -resource "google_scc_project_custom_module" "example" { - display_name = "full_custom_module" - enablement_state = "ENABLED" - custom_config { - predicate { - expression = "resource.rotationPeriod > duration(\"2592000s\")" - title = "Purpose of the expression" - description = "description of the expression" - location = "location of the expression" - } - custom_output { - properties { - name = "duration" - value_expression { - expression = "resource.rotationPeriod" - title = "Purpose of the expression" - description = "description of the expression" - location = "location of the expression" - } - } - } - resource_selector { - resource_types = [ - "cloudkms.googleapis.com/CryptoKey", - ] - } - severity = "LOW" - description = "Description of the custom module" - recommendation = "Steps to resolve violation" - } -} -`, context) -} - -func testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { - return func(s *terraform.State) error { - for name, rs := range s.RootModule().Resources { - if rs.Type != "google_scc_project_custom_module" { - continue - } - if strings.HasPrefix(name, "data.") { - continue - } - - config := acctest.GoogleProviderConfig(t) - - url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{SecurityCenterBasePath}}projects/{{project}}/securityHealthAnalyticsSettings/customModules/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if config.BillingProject != "" { - billingProject = config.BillingProject - } - - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: billingProject, - RawURL: url, - UserAgent: config.UserAgent, - }) - if err == nil { - return fmt.Errorf("SecurityCenterProjectCustomModule still exists at %s", url) - } - } - - return nil - } -} diff --git a/google/services/securitycenter/resource_scc_project_custom_module_test.go b/google/services/securitycenter/resource_scc_project_custom_module_test.go index 19b38db68e9..22966194ff2 100644 --- a/google/services/securitycenter/resource_scc_project_custom_module_test.go +++ b/google/services/securitycenter/resource_scc_project_custom_module_test.go @@ -3,14 +3,21 @@ package securitycenter_test import ( + "fmt" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) -func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(t *testing.T) { +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterProjectCustomModule(t *testing.T) { t.Parallel() context := map[string]interface{}{ @@ -22,6 +29,14 @@ func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(t *te ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t), Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_project_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(context), }, @@ -42,6 +57,64 @@ func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(t *te }) } +func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_project_custom_module" "example" { + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +`, context) +} + +func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_project_custom_module" "example" { + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_scc_project_custom_module" "example" { @@ -77,3 +150,42 @@ resource "google_scc_project_custom_module" "example" { } `, context) } + +func testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_project_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{SecurityCenterBasePath}}projects/{{project}}/securityHealthAnalyticsSettings/customModules/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterProjectCustomModule still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/sql/resource_sql_database_instance.go b/google/services/sql/resource_sql_database_instance.go index 1f486c12903..c27c233dbc3 100644 --- a/google/services/sql/resource_sql_database_instance.go +++ b/google/services/sql/resource_sql_database_instance.go @@ -949,6 +949,11 @@ is set to true. Defaults to ZONAL.`, DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), Description: `The timestamp of the point in time that should be restored.`, }, + "preferred_zone": { + Type: schema.TypeString, + Optional: true, + Description: `(Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.`, + }, "database_names": { Type: schema.TypeList, Optional: true, @@ -1320,6 +1325,7 @@ func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, strin return &sqladmin.CloneContext{ PointInTime: _cloneConfiguration["point_in_time"].(string), + PreferredZone: _cloneConfiguration["preferred_zone"].(string), DatabaseNames: databaseNames, AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), }, _cloneConfiguration["source_instance_name"].(string) diff --git a/google/services/sql/resource_sql_database_instance_test.go b/google/services/sql/resource_sql_database_instance_test.go index 5ff2e3b982e..210da6408ed 100644 --- a/google/services/sql/resource_sql_database_instance_test.go +++ b/google/services/sql/resource_sql_database_instance_test.go @@ -3686,6 +3686,7 @@ resource "google_sql_database_instance" "instance" { clone { source_instance_name = data.google_sql_backup_run.backup.instance point_in_time = data.google_sql_backup_run.backup.start_time + preferred_zone = "us-central1-b" } deletion_protection = false diff --git a/google/services/storagetransfer/resource_storage_transfer_job.go b/google/services/storagetransfer/resource_storage_transfer_job.go index 3b639850139..26920da65ff 100644 --- a/google/services/storagetransfer/resource_storage_transfer_job.go +++ b/google/services/storagetransfer/resource_storage_transfer_job.go @@ -87,6 +87,33 @@ func ResourceStorageTransferJob() *schema.Resource { ForceNew: true, Description: `The project in which the resource belongs. If it is not provided, the provider project is used.`, }, + "event_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"schedule"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Specifies a unique name of the resource such as AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name', or Pub/Sub subscription resource name in the form 'projects/{project}/subscriptions/{sub}'", + }, + "event_stream_start_time": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the date and time that Storage Transfer Service starts listening for events from this stream. If no start time is specified or start time is in the past, Storage Transfer Service starts listening immediately", + ValidateFunc: validation.IsRFC3339Time, + }, + "event_stream_expiration_time": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the data and time at which Storage Transfer Service stops listening for events from this stream. After this time, any transfers in progress will complete, but no new transfers are initiated", + ValidateFunc: validation.IsRFC3339Time, + }, + }, + }, + }, "transfer_spec": { Type: schema.TypeList, Required: true, @@ -200,9 +227,10 @@ func ResourceStorageTransferJob() *schema.Resource { Description: `Notification configuration.`, }, "schedule": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"event_stream"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "schedule_start_date": { @@ -572,6 +600,7 @@ func resourceStorageTransferJobCreate(d *schema.ResourceData, meta interface{}) ProjectId: project, Status: d.Get("status").(string), Schedule: expandTransferSchedules(d.Get("schedule").([]interface{})), + EventStream: expandEventStream(d.Get("event_stream").([]interface{})), TransferSpec: expandTransferSpecs(d.Get("transfer_spec").([]interface{})), NotificationConfig: expandTransferJobNotificationConfig(d.Get("notification_config").([]interface{})), } @@ -647,6 +676,11 @@ func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) er return err } + err = d.Set("event_stream", flattenTransferEventStream(res.EventStream)) + if err != nil { + return err + } + err = d.Set("transfer_spec", flattenTransferSpec(res.TransferSpec, d)) if err != nil { return err @@ -675,6 +709,13 @@ func resourceStorageTransferJobUpdate(d *schema.ResourceData, meta interface{}) transferJob := &storagetransfer.TransferJob{} fieldMask := []string{} + if d.HasChange("event_stream") { + fieldMask = append(fieldMask, "event_stream") + if v, ok := d.GetOk("event_stream"); ok { + transferJob.EventStream = expandEventStream(v.([]interface{})) + } + } + if d.HasChange("description") { fieldMask = append(fieldMask, "description") if v, ok := d.GetOk("description"); ok { @@ -904,6 +945,39 @@ func flattenTransferSchedule(transferSchedule *storagetransfer.Schedule) []map[s return []map[string]interface{}{data} } +func expandEventStream(e []interface{}) *storagetransfer.EventStream { + if len(e) == 0 || e[0] == nil { + return nil + } + + eventStream := e[0].(map[string]interface{}) + return &storagetransfer.EventStream{ + Name: eventStream["name"].(string), + EventStreamStartTime: eventStream["event_stream_start_time"].(string), + EventStreamExpirationTime: eventStream["event_stream_expiration_time"].(string), + } +} + +func flattenTransferEventStream(eventStream *storagetransfer.EventStream) []map[string]interface{} { + if eventStream == nil || reflect.DeepEqual(eventStream, &storagetransfer.EventStream{}) { + return nil + } + + data := map[string]interface{}{ + "name": eventStream.Name, + } + + if eventStream.EventStreamStartTime != "" { + data["event_stream_start_time"] = eventStream.EventStreamStartTime + } + + if eventStream.EventStreamExpirationTime != "" { + data["event_stream_expiration_time"] = eventStream.EventStreamExpirationTime + } + + return []map[string]interface{}{data} +} + func expandGcsData(gcsDatas []interface{}) *storagetransfer.GcsData { if len(gcsDatas) == 0 || gcsDatas[0] == nil { return nil diff --git a/google/services/storagetransfer/resource_storage_transfer_job_test.go b/google/services/storagetransfer/resource_storage_transfer_job_test.go index 3260cee7c57..7ec2a5082d0 100644 --- a/google/services/storagetransfer/resource_storage_transfer_job_test.go +++ b/google/services/storagetransfer/resource_storage_transfer_job_test.go @@ -201,6 +201,59 @@ func TestAccStorageTransferJob_transferOptions(t *testing.T) { }) } +func TestAccStorageTransferJob_eventStream(t *testing.T) { + t.Parallel() + + testDataSourceBucketName := acctest.RandString(t, 10) + testDataSinkName := acctest.RandString(t, 10) + testTransferJobDescription := acctest.RandString(t, 10) + testPubSubTopicName := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + testEventStreamPubSubTopicName := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + testPubSubSubscriptionName := fmt.Sprintf("tf-test-subscription-%s", acctest.RandString(t, 10)) + eventStreamStart := []string{"2014-10-02T15:01:23Z", "2019-10-02T15:01:23Z"} + eventStreamEnd := []string{"2022-10-02T15:01:23Z", "2032-10-02T15:01:23Z"} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageTransferJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageTransferJob_basic(envvar.GetTestProjectFromEnv(), testDataSourceBucketName, testDataSinkName, testTransferJobDescription, testPubSubTopicName), + }, + { + ResourceName: "google_storage_transfer_job.transfer_job", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccStorageTransferJob_eventStream(envvar.GetTestProjectFromEnv(), testDataSourceBucketName, testDataSinkName, testEventStreamPubSubTopicName, testPubSubSubscriptionName, testTransferJobDescription, eventStreamStart[0], eventStreamEnd[0]), + }, + { + ResourceName: "google_storage_transfer_job.transfer_job", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccStorageTransferJob_eventStream(envvar.GetTestProjectFromEnv(), testDataSourceBucketName, testDataSinkName, testEventStreamPubSubTopicName, testPubSubSubscriptionName, testTransferJobDescription, eventStreamStart[1], eventStreamEnd[0]), + }, + { + ResourceName: "google_storage_transfer_job.transfer_job", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccStorageTransferJob_eventStream(envvar.GetTestProjectFromEnv(), testDataSourceBucketName, testDataSinkName, testEventStreamPubSubTopicName, testPubSubSubscriptionName, testTransferJobDescription, eventStreamStart[1], eventStreamEnd[1]), + }, + { + ResourceName: "google_storage_transfer_job.transfer_job", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccStorageTransferJob_objectConditions(t *testing.T) { t.Parallel() @@ -383,6 +436,99 @@ resource "google_storage_transfer_job" "transfer_job" { `, project, dataSourceBucketName, project, dataSinkBucketName, project, transferJobDescription, project) } +func testAccStorageTransferJob_eventStream(project string, dataSourceBucketName string, dataSinkBucketName string, pubsubTopicName string, pubsubSubscriptionName string, transferJobDescription string, eventStreamStart string, eventStreamEnd string) string { + return fmt.Sprintf(` +data "google_storage_transfer_project_service_account" "default" { + project = "%s" +} + +resource "google_storage_bucket" "data_source" { + name = "%s" + project = "%s" + location = "US" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_iam_member" "data_source" { + bucket = google_storage_bucket.data_source.name + role = "roles/storage.admin" + member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" +} + +resource "google_storage_bucket" "data_sink" { + name = "%s" + project = "%s" + location = "US" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_iam_member" "data_sink" { + bucket = google_storage_bucket.data_sink.name + role = "roles/storage.admin" + member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" +} + +resource "google_pubsub_subscription_iam_member" "editor" { + subscription = google_pubsub_subscription.example.name + role = "roles/editor" + member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}" +} + +resource "google_pubsub_topic" "example" { + name = "%s" +} + +resource "google_pubsub_subscription" "example" { + name = "%s" + topic = google_pubsub_topic.example.name + + ack_deadline_seconds = 20 + + labels = { + foo = "bar" + } + + push_config { + push_endpoint = "https://example.com/push" + + attributes = { + x-goog-version = "v1" + } + } +} + +resource "google_storage_transfer_job" "transfer_job" { + description = "%s" + project = "%s" + + event_stream { + name = google_pubsub_subscription.example.id + event_stream_start_time = "%s" + event_stream_expiration_time = "%s" + } + + transfer_spec { + gcs_data_source { + bucket_name = google_storage_bucket.data_source.name + path = "foo/bar/" + } + gcs_data_sink { + bucket_name = google_storage_bucket.data_sink.name + path = "foo/bar/" + } + } + + depends_on = [ + google_storage_bucket_iam_member.data_source, + google_storage_bucket_iam_member.data_sink, + google_pubsub_subscription_iam_member.editor, + ] +} +`, project, dataSourceBucketName, project, dataSinkBucketName, project, pubsubTopicName, pubsubSubscriptionName, transferJobDescription, project, eventStreamStart, eventStreamEnd) +} + func testAccStorageTransferJob_omitNotificationConfig(project string, dataSourceBucketName string, dataSinkBucketName string, transferJobDescription string) string { return fmt.Sprintf(` data "google_storage_transfer_project_service_account" "default" { diff --git a/google/services/tags/resource_tags_tag_binding.go b/google/services/tags/resource_tags_tag_binding.go index 5af248857a5..696b3bcc21e 100644 --- a/google/services/tags/resource_tags_tag_binding.go +++ b/google/services/tags/resource_tags_tag_binding.go @@ -288,6 +288,14 @@ func resourceTagsTagBindingImport(d *schema.ResourceData, meta interface{}) ([]* return nil, err } + stringParts := strings.Split(d.Get("name").(string), "/") + if len(stringParts) < 3 { + return nil, fmt.Errorf("Error parsing parent name. Should be in form {{parent}}/tagValues/{{tag_value}}") + } + if err := d.Set("parent", stringParts[0]); err != nil { + return nil, fmt.Errorf("Error setting parent, %s", err) + } + name := d.Get("name").(string) d.SetId(name) diff --git a/google/services/vertexai/resource_vertex_ai_index.go b/google/services/vertexai/resource_vertex_ai_index.go index b66ec5686f7..fe4c445682a 100644 --- a/google/services/vertexai/resource_vertex_ai_index.go +++ b/google/services/vertexai/resource_vertex_ai_index.go @@ -89,6 +89,16 @@ Please refer to the field 'effective_labels' for all of the labels present on th MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "contents_delta_uri": { + Type: schema.TypeString, + Required: true, + Description: `Allows inserting, updating or deleting the contents of the Matching Engine Index. +The string must be a valid Cloud Storage directory path. If this +field is set when calling IndexService.UpdateIndex, then no other +Index field can be also updated as part of the same call. +The expected structure and format of the files this URI points to is +described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format`, + }, "config": { Type: schema.TypeList, Optional: true, @@ -188,16 +198,6 @@ The shard size must be specified when creating an index. The value must be one o }, }, }, - "contents_delta_uri": { - Type: schema.TypeString, - Optional: true, - Description: `Allows inserting, updating or deleting the contents of the Matching Engine Index. -The string must be a valid Cloud Storage directory path. If this -field is set when calling IndexService.UpdateIndex, then no other -Index field can be also updated as part of the same call. -The expected structure and format of the files this URI points to is -described at https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-engine#input-data-format`, - }, "is_complete_overwrite": { Type: schema.TypeBool, Optional: true, diff --git a/google/tpgresource/utils.go b/google/tpgresource/utils.go index 4b3bb142eb9..7957091f4e3 100644 --- a/google/tpgresource/utils.go +++ b/google/tpgresource/utils.go @@ -623,7 +623,7 @@ func ReplaceVarsForId(d TerraformResourceData, config *transport_tpg.Config, lin // substitution as 10+ calls to allow for future use cases. func ReplaceVarsRecursive(d TerraformResourceData, config *transport_tpg.Config, linkTmpl string, shorten bool, depth int) (string, error) { if depth > 10 { - return "", errors.New("Recursive substitution detcted") + return "", errors.New("Recursive substitution detected") } // https://github.com/google/re2/wiki/Syntax diff --git a/website/docs/guides/version_5_upgrade.html.markdown b/website/docs/guides/version_5_upgrade.html.markdown index 3845211cd37..d1ebdedd2b1 100644 --- a/website/docs/guides/version_5_upgrade.html.markdown +++ b/website/docs/guides/version_5_upgrade.html.markdown @@ -118,10 +118,50 @@ The new annotations model is similar to the new labels model and will be applied There are now two annotation-related fields with the new model, the `annotations` and the output-only `effective_annotations` fields. +### Updates to how empty strings are handled in the `provider` block + +In 5.0.0+ any empty strings set in the `provider` block will be used and not ignored. Previously any empty strings used as arguments in the `provider` block were ignored and did not contribute to configuration of the provider. + +Users should remove empty string arguments to avoid errors during plan/apply stages. + +```tf +provider "google" { + credentials = "" # this line should be removed + project = "my-project" + region = "us-central1" + zone = "" # this line should be removed +} +``` + +### Changes to how default `location`, `region` and `zone` values are obtained for resources + +Currently, when configuring resources that require a `location`, `region` or `zone` field you have the choice of specifying it in the resource block or allowing default values to be used. Default [region](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#region) or [zone](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#zone) values can be configured in the provider block or by providing values through environment variables. + +Changes in 5.0.0 make the way the provider handles `location`/`region`/`zone` values more consistent: + +* Resources that have a `location` field will now use the default `region` value preferentially over the default `zone` value set on the provider. This is only relevant to resources where `location` is not provided in the resource block directly. +* Previously, default `region` and `zone` values set as URIs were incompatible with resources that have `location` or `region` arguments. In 5.0.0+ those values will now be valid and won't result in errors during plan/apply stages. + + +#### When you may need to take action + +There is only one change that we anticipate can lead to unexpected diffs in Terraform plans after upgrading to 5.0.0, which is: + +> Resources that have a `location` field will now use the default `region` value preferentially over the default `zone` value set on the provider. This is only relevant to resources where `location` is not provided in the resource block directly. + +Users will need to check for unexpected `location` changes for resources. If an unexpected change is seen, the solution is to explicitly set the `location` value in that resource's configuration block to match the desired value. + +This will only affect users whose configuration contains resource blocks that have missing `location` values and whose [default zone](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#zone) value belongs to a region that's different than the [default region](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#region) value. For example, if you set `us-central1-a` as the default zone and `us-central2` as the default region on the provider you may see plans that contain unexpected diffs to move resources from `us-central1` to `us-central2`. + + ### Provider default values shown at plan-time `project`, `region`, and `zone` fields will now display their values during plan-time instead of the placeholder `(known after apply)` value normally displayed for fields without fixed Terraform default values. These values will be taken from either the Terraform resource config file, provider config, or local environment variables, depending on which variables are supplied by the user, matching the existing per-resource functionality for what default values are used in execution of a Terraform plan. +### Resource import formats have improved validation + +Throughout the provider there were many resources which erroneously gave false positives to poorly formatted import input if a subset of the provided input was valid to their configured import formats. All GCP resource IDs supplied to "terraform import" must match the documentation specified import formats exactly. + ## Datasources ### Datasources now error universally on 404 @@ -271,12 +311,14 @@ resource "google_firebaserules_ruleset" "firestore" { These two unsupported fields were introduced incorrectly. They are now removed. + ## Resource: `google_cloud_run_v2_service` ### `liveness_probe.tcp_socket` is now removed This unsupported field was introduced incorrectly. It is now removed. + ## Resource: `google_container_cluster` ### Clusters created in error states are now tainted rather than deleted @@ -288,6 +330,50 @@ deleted. This behavior was changed to allow users to collect internal logs from the cluster and/or manually resolve the issues and untaint their failed clusters. +### `node_config.0.taint` and `node_pool.0.node_config.0.taint` field change + +The `taint` field has been changed to manage a subset of the taint keys on a node pool +and the `effective_taints` output field has been added to record the complete set of +taints applied to the node pool by GKE. + +Previously, the field was authoritative and would require every taint on the node pool +to be recorded, causing friction when users used GPUs or configured sandbox settings, +actions which added taints. After this change, only "Terraform-managed" taints will be +managed by the `taint` field. Other taints, including new taints injected by the +server, will not have drift detected. + +Currently, the set of managed taints and their values are immutable in Terraform, and +any changes will cause a recreate to be planned. However, taints can be unmanaged by +simultaneously removing the taint entry from GKE and your Terraform configuration at +the same time. + +The set of taints Terraform manages (and their values) will be determined based on +how the cluster or node pool resource was added to your Terraform state file: + +* If you created the cluster or node pool with Terraform with Google provider 5.0.0 +or later, the set of taints specified during resource creation will be managed. +* If you imported the cluster or node pool with Google provider 5.0.0 or later, no +taints will be managed by Terraform +* If you upgraded from an earlier version, the complete set of taint values applied to the +node pool at the time of your last refresh will be managed by Terraform + +Most existing configurations will not be affected with this change as they already specify +the whole set of managed taints, or are already ignoring changes with `lifecycle.ignore_changes`, +preventing a diff. + +A limited number of users may see a diff if they are using the `google-beta` provider +and have specified a `sandbox_config` value. If that's the case, you can safely add the +proposed value to configuration (below) or apply `lifecycle.ignore_changes` to the field to resolve. + + +```diff ++ taint { ++ key = "sandbox.gke.io/runtime" ++ value = "gvisor" ++ effect = "NO_SCHEDULE" ++ } +``` + ### `enable_binary_authorization` is now removed `enable_binary_authorization` has been removed in favor of `binary_authorization.enabled`. @@ -297,6 +383,60 @@ cluster and/or manually resolve the issues and untaint their failed clusters. Previously `network_policy.provider` defaulted to "PROVIDER_UNSPECIFIED". It no longer has a default value. +## Resource: `google_container_node_pool` + +### `logging_variant` no longer has a provider default value + +Previously `logging_variant` defaulted to "DEFAULT". It no longer has a default value. + +### `management.auto_repair` and `management.auto_upgrade` now default to true + +Previously both fields defaulted to false. They now default to true. + +### `node_config.0.taint` field change + +The `taint` field has been changed to manage a subset of the taint keys on a node pool +and the `effective_taints` output field has been added to record the complete set of +taints applied to the node pool by GKE. + +Previously, the field was authoritative and would require every taint on the node pool +to be recorded, causing friction when users used GPUs or configured sandbox settings, +actions which added taints. After this change, only "Terraform-managed" taints will be +managed by the `taint` field. Other taints, including new taints injected by the +server, will not have drift detected. + +Currently, the set of managed taints and their values are immutable in Terraform, and +any changes will cause a recreate to be planned. However, taints can be unmanaged by +simultaneously removing the taint entry from GKE and your Terraform configuration at +the same time. + +The set of taints Terraform manages (and their values) will be determined based on +how the cluster or node pool resource was added to your Terraform state file: + +* If you created the cluster or node pool with Terraform with Google provider 5.0.0 +or later, the set of taints specified during resource creation will be managed. +* If you imported the cluster or node pool with Google provider 5.0.0 or later, no +taints will be managed by Terraform +* If you upgraded from an earlier version, the complete set of taint values applied to the +node pool at the time of your last refresh will be managed by Terraform + +Most existing configurations will not be affected with this change as they already specify +the whole set of managed taints, or are already ignoring changes with `lifecycle.ignore_changes`, +preventing a diff. + +A limited number of users may see a diff if they are using the `google-beta` provider +and have specified a `sandbox_config` value. If that's the case, you can safely add the +proposed value to configuration (below) or apply `lifecycle.ignore_changes` to the field to resolve. + + +```diff ++ taint { ++ key = "sandbox.gke.io/runtime" ++ value = "gvisor" ++ effect = "NO_SCHEDULE" ++ } +``` + ## Resource: `google_dataplex_datascan` ### `dataQualityResult` and `dataProfileResult` output fields are now removed @@ -312,7 +452,6 @@ it will use the default value from the API which is `FALSE`. If you want to enable endpoint independent mapping, then explicity set the value of `enable_endpoint_independent_mapping` field to `TRUE`. - ## Resource: `google_firebase_project_location` ### `google_firebase_project_location` is now removed @@ -377,12 +516,28 @@ resource "google_firestore_database" "default" { ### `deletion_policy` now defaults to `DELETE` Previously, `google_firebase_web_app` deletions default to `ABANDON`, which means to only stop tracking the WebApp in Terraform. The actual app is not deleted from the Firebase project. If you are relying on this behavior, set `deletion_policy` to `ABANDON` explicitly in the new version. + ## Resource: `google_compute_autoscaler` (beta) ### `metric.filter` now defaults to `resource.type = gce_instance` Previously, `metric.filter` doesn't have the defult value and causes a UI error. +## Resource: `google_monitoring_dashboard` + +### `dashboard_json` suppresses removal diffs more aggressively + +To prevent permanent diffs from default values, Terraform will now attempt to suppress diffs where the value is returned in the JSON +string but doesn't exist in the configuration. Consequently, legitmate remove-only diffs will also be suppressed. +For Terraform to detect the diff, JSON key removals must also be accompanied by a non-removal change (trivial or not). + +## Resource: `google_monitoring_metric_descriptor` + +### Changing `labels` now triggers replacement + +Previously, attempting to update `labels` failed and created a permadiff. The `labels` +field is now immutable without destroying and recreating the resource. + ## Resource: `google_privateca_certificate` ### `config_values` is now removed @@ -490,6 +645,10 @@ If you were relying on accessing an individual flag by index (for example, `goog `google_service_networking_connection` now uses the Create endpoint instead of the Patch endpoint during the creation step. Previously, Patch was used as a workaround for an issue that has since been resolved. +### "terraform destroy" now fully deletes the resource instead of abandoning + +`google_service_networking_connection` now uses API `deleteConnection` method instead of `removePeering` method during the deletion step. Previously, `removePeering` method was used because `deleteConnection` method was unavailable. In some cases a private connection cannot be deleted immediately after the resource using that connection is deleted, and users may have to delete the private connection after a waiting period. + ## Resource: `google_secret_manager_secret` ### `replication.automatic` is now removed @@ -520,14 +679,50 @@ resource "google_secret_manager_secret" "my-secret" { } ``` -## Resource: `google_identity_platform_project_default_config` - -### `google_identity_platform_project_default_config` has been removed from the provider - -Use the `google_identity_platform_config` resource instead. It contains a more comprehensive list of fields, and was created before `google_identity_platform_project_default_config` was added. - ## Resource: `google_compute_service_attachment` ### `reconcile_connections` now defaults from API `reconcile_connections` previously defaults to true. Now it will default from the API. + +### Retyped `consumer_accept_lists` to SET from ARRAY + +Previously, `consumer_accept_lists` was a list, making it order-dependent. It is now a set. + +If you were relying on accessing an individual flag by index (for example, `google_compute_service_attachment.consumer_accept_lists.0.project_id_or_num`), then that will now need to by hash (for example, `google_compute_service_attachment.consumer_accept_lists..project_id_or_num`). + +## Resource: `google_dataflow_flex_template_job` + +### Fields that are a part of the [environment block](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#FlexTemplateRuntimeEnvironment) will be overriden to be sent via their fields even when supplied via parameters. + +Several fields within the `google_dataflow_flex_template_job` resource can be supplied through either the `parameters{}` block or a field on the resource object. Support for these fields on the resource object was added in the `4.66.0` release of the Google provider. That version introduced an issue where the values were being double-sent to the API due to being recorded in Terraform state in two places. To resolve this issue, these fields will be deduplicated and sent to the API through the resource object. + +Additionally, due to the API returning these fields to the user they will now be considered computed and users will see values twice within their state when configuring these fields' values via the `parameters{}` block. + +## Resource: `google_compute_node_group` + +### Node groups are now mutable + +Due to limitations in previous field configurations, the only field that could be updated previously was `node_template`. It is now possible to adjust the `autoscaling_policy` without recreating the group, nor will any adjustment to the `size` of the nodepool prompt resource recration. + +### `size` is now an output only field. + +`size` previously served as an alias for `initial_size` on resource creation, and users would be required to recreate the resource if the `size` value ever adjusted due to either direct user update or auto-scaling adjustment outside of Terraform. + +It will now mirror its API functionality and serve as an output only field to show how many nodes currently exist within the resource. All existing configurations which used `size` as an input field must be updated for its removal. + +### One of `initial_size` or `autoscaling_policy{}` must be configured on resource creation. + +These fields will supply the base node-count for a node group and one of them will be required for successful resource creation. Both will be freely updateable or removable on future state changes that do not require recreation. + +## Resource: `google_looker_instance` + +### `LOOKER_MODELER` has been removed as a platform edition. + +Looker Modeler edition is deprecated as a platform edition. + +Deprecated in favor of field `pem_certificate_chain`. It is now removed. + +## Resource: `google_gkeonprem_bare_metal_admin_cluster` + +Delete operation is disabled. The command `terraform destroy` maps to no-op. Users need to delete resource manually. Please refer to the [user guide](https://cloud.google.com/anthos/clusters/docs/bare-metal/latest/how-to/reset-nodes) for the instructions of cluster deletion. diff --git a/website/docs/r/alloydb_backup.html.markdown b/website/docs/r/alloydb_backup.html.markdown index fcc7a6f3f87..21c682fb52e 100644 --- a/website/docs/r/alloydb_backup.html.markdown +++ b/website/docs/r/alloydb_backup.html.markdown @@ -92,6 +92,7 @@ resource "google_alloydb_backup" "default" { cluster_name = google_alloydb_cluster.default.name description = "example description" + type = "ON_DEMAND" labels = { "label" = "key" } @@ -152,11 +153,18 @@ The following arguments are supported: - - - +* `display_name` - + (Optional) + User-settable and human-readable display name for the Backup. + * `labels` - (Optional) - User-defined labels for the alloydb backup. - **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - Please refer to the field `effective_labels` for all of the labels present on the resource. + User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `type` - + (Optional) + The backup type, which suggests the trigger for the backup. + Possible values are: `TYPE_UNSPECIFIED`, `ON_DEMAND`, `AUTOMATED`, `CONTINUOUS`. * `description` - (Optional) @@ -167,6 +175,11 @@ The following arguments are supported: EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). Structure is [documented below](#nested_encryption_config). +* `annotations` - + (Optional) + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -190,30 +203,45 @@ In addition to the arguments listed above, the following computed attributes are Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. * `create_time` - - Time the Backup was created in UTC. + Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". * `update_time` - - Time the Backup was updated in UTC. + Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `delete_time` - + Output only. Delete time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". * `state` - - The current state of the backup. + Output only. The current state of the backup. -* `reconciling` - - If true, indicates that the service is actively updating the resource. This can happen due to user-triggered updates or system actions like failover or maintenance. +* `cluster_uid` - + Output only. The system-generated UID of the cluster which was used to create this resource. -* `etag` - - A hash of the resource. +* `reconciling` - + Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. + This can happen due to user-triggered updates or system actions like failover or maintenance. * `encryption_info` - EncryptionInfo describes the encryption information of a cluster or a backup. Structure is [documented below](#nested_encryption_info). -* `terraform_labels` - - The combination of labels configured directly on the resource - and default labels configured on the provider. +* `etag` - + For Resource freshness validation (https://google.aip.dev/154) + +* `size_bytes` - + Output only. The size of the backup in bytes. -* `effective_labels` - - All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services. +* `expiry_time` - + Output only. The time at which after the backup is eligible to be garbage collected. + It is the duration specified by the backup's retention policy, added to the backup's createTime. + +* `expiry_quantity` - + Output only. The QuantityBasedExpiry of the backup, specified by the backup's retention policy. + Once the expiry quantity is over retention, the backup is eligible to be garbage collected. + Structure is [documented below](#nested_expiry_quantity). The `encryption_info` block contains: @@ -226,6 +254,16 @@ In addition to the arguments listed above, the following computed attributes are (Output) Output only. Cloud KMS key versions that are being used to protect the database or the backup. +The `expiry_quantity` block contains: + +* `retention_count` - + (Output) + Output only. The backup's position among its backups with the same source cluster and type, by descending chronological order create time (i.e. newest first). + +* `total_retention_count` - + (Output) + Output only. The length of the quantity-based queue, specified by the backup's retention policy. + ## Timeouts This resource provides the following diff --git a/website/docs/r/alloydb_cluster.html.markdown b/website/docs/r/alloydb_cluster.html.markdown index 8f141815eb8..bf3a450eaf7 100644 --- a/website/docs/r/alloydb_cluster.html.markdown +++ b/website/docs/r/alloydb_cluster.html.markdown @@ -232,6 +232,15 @@ The following arguments are supported: (Optional) User-settable and human-readable display name for the Cluster. +* `etag` - + (Optional) + For Resource freshness validation (https://google.aip.dev/154) + +* `annotations` - + (Optional) + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + * `initial_user` - (Optional) Initial user to setup during cluster creation. @@ -441,6 +450,14 @@ In addition to the arguments listed above, the following computed attributes are ContinuousBackupInfo describes the continuous backup properties of a cluster. Structure is [documented below](#nested_continuous_backup_info). +* `reconciling` - + Output only. Reconciling (https://google.aip.dev/128#reconciliation). + Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. + This can happen due to user-triggered updates or system actions like failover or maintenance. + +* `state` - + Output only. The current serving state of the cluster. + * `database_version` - The database engine major version. This is an output-only field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation. diff --git a/website/docs/r/alloydb_instance.html.markdown b/website/docs/r/alloydb_instance.html.markdown index ac7e20c0a0c..e5bb5c3ccdd 100644 --- a/website/docs/r/alloydb_instance.html.markdown +++ b/website/docs/r/alloydb_instance.html.markdown @@ -135,6 +135,11 @@ The following arguments are supported: can have regional availability (nodes are present in 2 or more zones in a region).' Possible values are: `AVAILABILITY_TYPE_UNSPECIFIED`, `ZONAL`, `REGIONAL`. +* `query_insights_config` - + (Optional) + Configuration for query insights. + Structure is [documented below](#nested_query_insights_config). + * `read_pool_config` - (Optional) Read pool specific config. If the instance type is READ_POOL, this configuration must be provided. @@ -146,6 +151,24 @@ The following arguments are supported: Structure is [documented below](#nested_machine_config). +The `query_insights_config` block supports: + +* `query_string_length` - + (Optional) + Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid. + +* `record_application_tags` - + (Optional) + Record application tags for an instance. This flag is turned "on" by default. + +* `record_client_address` - + (Optional) + Record client address for an instance. Client address is PII information. This flag is turned "on" by default. + +* `query_plans_per_minute` - + (Optional) + Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid. + The `read_pool_config` block supports: * `node_count` - diff --git a/website/docs/r/artifact_registry_repository.html.markdown b/website/docs/r/artifact_registry_repository.html.markdown index 442b5ac0393..32df96f0e5f 100644 --- a/website/docs/r/artifact_registry_repository.html.markdown +++ b/website/docs/r/artifact_registry_repository.html.markdown @@ -147,6 +147,58 @@ resource "google_artifact_registry_repository" "my-repo" { } } ``` + +## Example Usage - Artifact Registry Repository Remote Apt + + +```hcl +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = "debian-buster" + description = "example remote apt repository" + format = "APT" + mode = "REMOTE_REPOSITORY" + remote_repository_config { + description = "Debian buster remote repository" + apt_repository { + public_repository { + repository_base = "DEBIAN" + repository_path = "debian/dists/buster" + } + } + } +} +``` + +## Example Usage - Artifact Registry Repository Remote Yum + + +```hcl +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = "centos-8" + description = "example remote yum repository" + format = "YUM" + mode = "REMOTE_REPOSITORY" + remote_repository_config { + description = "Centos 8 remote repository" + yum_repository { + public_repository { + repository_base = "CENTOS" + repository_path = "8-stream/BaseOs/x86_64/os" + } + } + } +} +```
Open in Cloud Shell @@ -392,6 +444,11 @@ The following arguments are supported: (Optional) The description of the remote source. +* `apt_repository` - + (Optional) + Specific settings for an Apt remote repository. + Structure is [documented below](#nested_apt_repository). + * `docker_repository` - (Optional) Specific settings for a Docker remote repository. @@ -412,6 +469,30 @@ The following arguments are supported: Specific settings for a Python remote repository. Structure is [documented below](#nested_python_repository). +* `yum_repository` - + (Optional) + Specific settings for an Yum remote repository. + Structure is [documented below](#nested_yum_repository). + + +The `apt_repository` block supports: + +* `public_repository` - + (Optional) + One of the publicly available Apt repositories supported by Artifact Registry. + Structure is [documented below](#nested_public_repository). + + +The `public_repository` block supports: + +* `repository_base` - + (Required) + A common public repository base for Apt, e.g. `"debian/dists/buster"` + Possible values are: `DEBIAN`, `UBUNTU`. + +* `repository_path` - + (Required) + Specific repository from the base. The `docker_repository` block supports: @@ -445,6 +526,25 @@ The following arguments are supported: Default value is `PYPI`. Possible values are: `PYPI`. +The `yum_repository` block supports: + +* `public_repository` - + (Optional) + One of the publicly available Yum repositories supported by Artifact Registry. + Structure is [documented below](#nested_public_repository). + + +The `public_repository` block supports: + +* `repository_base` - + (Required) + A common public repository base for Yum. + Possible values are: `CENTOS`, `CENTOS_DEBUG`, `CENTOS_VAULT`, `CENTOS_STREAM`, `ROCKY`, `EPEL`. + +* `repository_path` - + (Required) + Specific repository from the base, e.g. `"8-stream/BaseOs/x86_64/os"` + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/bigtable_instance.html.markdown b/website/docs/r/bigtable_instance.html.markdown index 9f64f4873d6..b0176072208 100644 --- a/website/docs/r/bigtable_instance.html.markdown +++ b/website/docs/r/bigtable_instance.html.markdown @@ -150,6 +150,7 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d In addition to the arguments listed above, the following computed attributes are exported: * `id` - an identifier for the resource with format `projects/{{project}}/instances/{{name}}` +* `cluster.0.state` - describes the current state of the cluster. ## Timeouts diff --git a/website/docs/r/compute_instance.html.markdown b/website/docs/r/compute_instance.html.markdown index 965f0d80a9d..7ad77392768 100644 --- a/website/docs/r/compute_instance.html.markdown +++ b/website/docs/r/compute_instance.html.markdown @@ -244,7 +244,7 @@ is desired, you will need to modify your state file manually using For instance, the image `centos-6-v20180104` includes its family name `centos-6`. These images can be referred by family name here. -* `labels` - (Optional) A set of key/value label pairs assigned to the disk. This +* `labels` - (Optional) A set of key/value label pairs assigned to the disk. This field is only applicable for persistent disks. * `resource_manager_tags` - (Optional) A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource. @@ -293,6 +293,7 @@ is desired, you will need to modify your state file manually using network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. + * `subnetwork_project` - (Optional) The project in which the subnetwork belongs. If the `subnetwork` is a self_link, this field is ignored in favor of the project defined in the subnetwork self_link. If the `subnetwork` is a name and this @@ -314,6 +315,8 @@ is desired, you will need to modify your state file manually using * `nic_type` - (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. +* `network_attachment` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The URL of the network attachment that this interface should connect to in the following format: `projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}`. + * `stack_type` - (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used. * `ipv6_access_config` - (Optional) An array of IPv6 access configurations for this interface. @@ -322,6 +325,7 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `queue_count` - (Optional) The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. +* `security_policy` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. The `access_config` block supports: @@ -339,14 +343,14 @@ specified, then this instance will have no external IPv6 Internet access. Struct The `ipv6_access_config` block supports: -* `external_ipv6` - (Optional) The first IPv6 address of the external IPv6 range associated - with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. - To use a static external IP address, it must be unused and in the same region as the instance's zone. +* `external_ipv6` - (Optional) The first IPv6 address of the external IPv6 range associated + with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. + To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. * `external_ipv6_prefix_length` - (Optional) The prefix length of the external IPv6 range. -* `name` - (Optional) The name of this access configuration. In ipv6AccessConfigs, the recommended name +* `name` - (Optional) The name of this access configuration. In ipv6AccessConfigs, the recommended name is "External IPv6". * `network_tier` - (Optional) The service-level to be provided for IPv6 traffic when the @@ -398,12 +402,12 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `min_node_cpus` - (Optional) The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. -* `provisioning_model` - (Optional) Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, +* `provisioning_model` - (Optional) Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, `preemptible` should be `true` and `automatic_restart` should be `false`. For more info about `SPOT`, read [here](https://cloud.google.com/compute/docs/instances/spot) - -* `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) + +* `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) * `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Only support `DELETE` `instance_termination_action` at this point. Structure is [documented below](#nested_max_run_duration). The `max_run_duration` block supports: diff --git a/website/docs/r/compute_router_nat.html.markdown b/website/docs/r/compute_router_nat.html.markdown index 075dbd2ab21..ff73e607741 100644 --- a/website/docs/r/compute_router_nat.html.markdown +++ b/website/docs/r/compute_router_nat.html.markdown @@ -171,6 +171,84 @@ resource "google_compute_router_nat" "nat_rules" { enable_endpoint_independent_mapping = false } ``` +## Example Usage - Router Nat Private + + +```hcl +resource "google_compute_network" "net" { + provider = google-beta + + name = "my-network" +} + +resource "google_compute_subnetwork" "subnet" { + provider = google-beta + + name = "my-subnetwork" + network = google_compute_network.net.id + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_compute_router" "router" { + provider = google-beta + + name = "my-router" + region = google_compute_subnetwork.subnet.region + network = google_compute_network.net.id +} + +resource "google_network_connectivity_hub" "hub" { + provider = google-beta + + name = "my-hub" + description = "vpc hub for inter vpc nat" +} + +resource "google_network_connectivity_spoke" "spoke" { + provider = google-beta + + name = "my-spoke" + location = "global" + description = "vpc spoke for inter vpc nat" + hub = google_network_connectivity_hub.hub.id + linked_vpc_network { + exclude_export_ranges = [ + "198.51.100.0/24", + "10.10.0.0/16" + ] + uri = google_compute_network.net.self_link + } +} + +resource "google_compute_router_nat" "nat_type" { + provider = google-beta + + name = "my-router-nat" + router = google_compute_router.router.name + region = google_compute_router.router.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + type = "PRIVATE" + subnetwork { + name = google_compute_subnetwork.subnet.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + rules { + rule_number = 100 + description = "rule for private nat" + match = "nexthop.hub == \"//networkconnectivity.googleapis.com/projects/acm-test-proj-123/locations/global/hubs/my-hub\"" + action { + source_nat_active_ranges = [ + google_compute_subnetwork.subnet.self_link + ] + } + } +} +``` ## Argument Reference @@ -182,13 +260,6 @@ The following arguments are supported: Name of the NAT service. The name must be 1-63 characters long and comply with RFC1035. -* `nat_ip_allocate_option` - - (Required) - How external IPs should be allocated for this NAT. Valid values are - `AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud - Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses. - Possible values are: `MANUAL_ONLY`, `AUTO_ONLY`. - * `source_subnetwork_ip_ranges_to_nat` - (Required) How NAT should be configured per Subnetwork. @@ -211,6 +282,13 @@ The following arguments are supported: - - - +* `nat_ip_allocate_option` - + (Optional) + How external IPs should be allocated for this NAT. Valid values are + `AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud + Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses. + Possible values are: `MANUAL_ONLY`, `AUTO_ONLY`. + * `nat_ips` - (Optional) Self-links of NAT IPs. Only valid if natIpAllocateOption @@ -283,6 +361,15 @@ The following arguments are supported: Enable endpoint independent mapping. For more information see the [official documentation](https://cloud.google.com/nat/docs/overview#specs-rfcs). +* `type` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + Indicates whether this NAT is used for public or private IP translation. + If unspecified, it defaults to PUBLIC. + If `PUBLIC` NAT used for public IP translation. + If `PRIVATE` NAT used for private IP translation. + Default value is `PUBLIC`. + Possible values are: `PUBLIC`, `PRIVATE`. + * `region` - (Optional) Region where the router and NAT reside. @@ -364,6 +451,18 @@ The following arguments are supported: These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. +* `source_nat_active_ranges` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + A list of URLs of the subnetworks used as source ranges for this NAT Rule. + These subnetworks must have purpose set to PRIVATE_NAT. + This field is used for private NAT. + +* `source_nat_drain_ranges` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + A list of URLs of subnetworks representing source ranges to be drained. + This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. + This field is used for private NAT. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/compute_subnetwork.html.markdown b/website/docs/r/compute_subnetwork.html.markdown index 4cc035bc752..902cd234073 100644 --- a/website/docs/r/compute_subnetwork.html.markdown +++ b/website/docs/r/compute_subnetwork.html.markdown @@ -183,6 +183,32 @@ resource "google_compute_network" "custom-test" { enable_ula_internal_ipv6 = true } ``` + +## Example Usage - Subnetwork Purpose Private Nat + + +```hcl +resource "google_compute_subnetwork" "subnetwork-purpose-private-nat" { + provider = google-beta + + name = "subnet-purpose-test-subnetwork" + region = "us-west2" + ip_cidr_range = "192.168.1.0/24" + purpose = "PRIVATE_NAT" + network = google_compute_network.custom-test.id +} + +resource "google_compute_network" "custom-test" { + provider = google-beta + + name = "subnet-purpose-test-network" + auto_create_subnetworks = false +} +``` ## Argument Reference @@ -223,10 +249,11 @@ The following arguments are supported: * `purpose` - (Optional) - The purpose of the resource. This field can be either `PRIVATE_RFC_1918`, `REGIONAL_MANAGED_PROXY`, `GLOBAL_MANAGED_PROXY`, or `PRIVATE_SERVICE_CONNECT`. + The purpose of the resource. This field can be either `PRIVATE_RFC_1918`, `REGIONAL_MANAGED_PROXY`, `GLOBAL_MANAGED_PROXY`, `PRIVATE_SERVICE_CONNECT` or `PRIVATE_NAT`([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). A subnet with purpose set to `REGIONAL_MANAGED_PROXY` is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnetwork in a given region with purpose set to `GLOBAL_MANAGED_PROXY` is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. A subnetwork with purpose set to `PRIVATE_SERVICE_CONNECT` reserves the subnet for hosting a Private Service Connect published service. + A subnetwork with purpose set to `PRIVATE_NAT` is used as source range for Private NAT gateways. Note that `REGIONAL_MANAGED_PROXY` is the preferred setting for all regional Envoy load balancers. If unspecified, the purpose defaults to `PRIVATE_RFC_1918`. diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index f9d4b4cecbc..2ad540829e0 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -413,7 +413,9 @@ Enable/Disable Security Posture API features for the cluster. Structure is [docu * `gcs_fuse_csi_driver_config` - (Optional) The status of the GCSFuse CSI driver addon, which allows the usage of a gcs bucket as volumes. - It is disabled by default; set `enabled = true` to enable. + It is disabled by default for Standard clusters; set `enabled = true` to enable. + It is enabled by default for Autopilot clusters with version 1.24 or later; set `enabled = true` to enable it explicitly. + See [Enable the Cloud Storage FUSE CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/cloud-storage-fuse-csi-driver#enable) for more information. * `cloudrun_config` - (Optional). Structure is [documented below](#nested_cloudrun_config). diff --git a/website/docs/r/google_folder.html.markdown b/website/docs/r/google_folder.html.markdown index 7d3b5c9b8e2..91d3e4a4ec6 100644 --- a/website/docs/r/google_folder.html.markdown +++ b/website/docs/r/google_folder.html.markdown @@ -52,6 +52,7 @@ In addition to the arguments listed above, the following computed attributes are exported: * `name` - The resource name of the Folder. Its format is folders/{folder_id}. +* `folder_id` - The folder id from the name "folders/{folder_id}" * `lifecycle_state` - The lifecycle state of the folder such as `ACTIVE` or `DELETE_REQUESTED`. * `create_time` - Timestamp when the Folder was created. Assigned by the server. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z". diff --git a/website/docs/r/os_config_patch_deployment.html.markdown b/website/docs/r/os_config_patch_deployment.html.markdown index 82f064b83d4..7360c240b43 100644 --- a/website/docs/r/os_config_patch_deployment.html.markdown +++ b/website/docs/r/os_config_patch_deployment.html.markdown @@ -284,6 +284,7 @@ resource "google_os_config_patch_deployment" "patch" { week_day_of_month { week_ordinal = -1 day_of_week = "TUESDAY" + day_offset = 3 } } } @@ -801,6 +802,10 @@ The following arguments are supported: A day of the week. Possible values are: `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. +* `day_offset` - + (Optional) + Represents the number of days before or after the given week day of month that the patch deployment is scheduled for. + The `rollout` block supports: * `mode` - diff --git a/website/docs/r/pubsub_schema.html.markdown b/website/docs/r/pubsub_schema.html.markdown index b27be906786..2e7073e4ac6 100644 --- a/website/docs/r/pubsub_schema.html.markdown +++ b/website/docs/r/pubsub_schema.html.markdown @@ -89,7 +89,11 @@ The following arguments are supported: (Optional) The definition of the schema. This should contain a string representing the full definition of the schema - that is a valid schema definition of the type specified in type. + that is a valid schema definition of the type specified in type. Changes + to the definition commit new [schema revisions](https://cloud.google.com/pubsub/docs/commit-schema-revision). + A schema can only have up to 20 revisions, so updates that fail with an + error indicating that the limit has been reached require manually + [deleting old revisions](https://cloud.google.com/pubsub/docs/delete-schema-revision). * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. diff --git a/website/docs/r/scc_folder_custom_module.html.markdown b/website/docs/r/scc_folder_custom_module.html.markdown new file mode 100644 index 00000000000..dd5db00a9f1 --- /dev/null +++ b/website/docs/r/scc_folder_custom_module.html.markdown @@ -0,0 +1,286 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Security Command Center (SCC)" +description: |- + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. +--- + +# google\_scc\_folder\_custom\_module + +Represents an instance of a Security Health Analytics custom module, including +its full module name, display name, enablement state, and last updated time. +You can create a custom module at the organization, folder, or project level. +Custom modules that you create at the organization or folder level are inherited +by the child folders and projects. + + +To get more information about FolderCustomModule, see: + +* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/folders.securityHealthAnalyticsSettings.customModules) +* How-to Guides + * [Overview of custom modules for Security Health Analytics](https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview) + +## Example Usage - Scc Folder Custom Module Basic + + +```hcl +resource "google_folder" "folder" { + parent = "organizations/123456789" + display_name = "folder-name" +} + +resource "google_scc_folder_custom_module" "example" { + folder = google_folder.folder.folder_id + display_name = "basic_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +``` +## Example Usage - Scc Folder Custom Module Full + + +```hcl +resource "google_folder" "folder" { + parent = "organizations/123456789" + display_name = "folder-name" +} + +resource "google_scc_folder_custom_module" "example" { + folder = google_folder.folder.folder_id + display_name = "full_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `display_name` - + (Required) + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + +* `enablement_state` - + (Required) + The enablement state of the custom module. + Possible values are: `ENABLED`, `DISABLED`. + +* `custom_config` - + (Required) + The user specified custom configuration for the module. + Structure is [documented below](#nested_custom_config). + +* `folder` - + (Required) + Numerical ID of the parent folder. + + +The `custom_config` block supports: + +* `predicate` - + (Required) + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + Structure is [documented below](#nested_predicate). + +* `custom_output` - + (Optional) + Custom output properties. + Structure is [documented below](#nested_custom_output). + +* `resource_selector` - + (Required) + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + Structure is [documented below](#nested_resource_selector). + +* `severity` - + (Required) + The severity to assign to findings generated by the module. + Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. + +* `description` - + (Optional) + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + +* `recommendation` - + (Required) + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. + + +The `predicate` block supports: + +* `expression` - + (Required) + Textual representation of an expression in Common Expression Language syntax. + +* `title` - + (Optional) + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + +* `description` - + (Optional) + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + +* `location` - + (Optional) + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + +The `custom_output` block supports: + +* `properties` - + (Optional) + A list of custom output properties to add to the finding. + Structure is [documented below](#nested_properties). + + +The `properties` block supports: + +* `name` - + (Optional) + Name of the property for the custom output. + +* `value_expression` - + (Optional) + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + Structure is [documented below](#nested_value_expression). + + +The `value_expression` block supports: + +* `expression` - + (Required) + Textual representation of an expression in Common Expression Language syntax. + +* `title` - + (Optional) + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + +* `description` - + (Optional) + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + +* `location` - + (Optional) + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + +The `resource_selector` block supports: + +* `resource_types` - + (Required) + The resource types to run the detector on. + +- - - + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}}` + +* `name` - + The resource name of the custom module. Its format is "folders/{folder_id}/securityHealthAnalyticsSettings/customModules/{customModule}". + The id {customModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + +* `update_time` - + The time at which the custom module was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `last_editor` - + The editor that last updated the custom module. + +* `ancestor_module` - + If empty, indicates that the custom module was created in the organization, folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +FolderCustomModule can be imported using any of these accepted formats: + +``` +$ terraform import google_scc_folder_custom_module.default folders/{{folder}}/securityHealthAnalyticsSettings/customModules/{{name}} +$ terraform import google_scc_folder_custom_module.default {{folder}}/{{name}} +``` diff --git a/website/docs/r/scc_organization_custom_module.html.markdown b/website/docs/r/scc_organization_custom_module.html.markdown new file mode 100644 index 00000000000..c18adfb4230 --- /dev/null +++ b/website/docs/r/scc_organization_custom_module.html.markdown @@ -0,0 +1,276 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Security Command Center (SCC)" +description: |- + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. +--- + +# google\_scc\_organization\_custom\_module + +Represents an instance of a Security Health Analytics custom module, including +its full module name, display name, enablement state, and last updated time. +You can create a custom module at the organization, folder, or project level. +Custom modules that you create at the organization or folder level are inherited +by the child folders and projects. + + +To get more information about OrganizationCustomModule, see: + +* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v1/organizations.securityHealthAnalyticsSettings.customModules) +* How-to Guides + * [Overview of custom modules for Security Health Analytics](https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview) + +## Example Usage - Scc Organization Custom Module Basic + + +```hcl +resource "google_scc_organization_custom_module" "example" { + organization = "123456789" + display_name = "basic_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +``` +## Example Usage - Scc Organization Custom Module Full + + +```hcl +resource "google_scc_organization_custom_module" "example" { + organization = "123456789" + display_name = "full_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `display_name` - + (Required) + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + +* `enablement_state` - + (Required) + The enablement state of the custom module. + Possible values are: `ENABLED`, `DISABLED`. + +* `custom_config` - + (Required) + The user specified custom configuration for the module. + Structure is [documented below](#nested_custom_config). + +* `organization` - + (Required) + Numerical ID of the parent organization. + + +The `custom_config` block supports: + +* `predicate` - + (Required) + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + Structure is [documented below](#nested_predicate). + +* `custom_output` - + (Optional) + Custom output properties. + Structure is [documented below](#nested_custom_output). + +* `resource_selector` - + (Required) + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + Structure is [documented below](#nested_resource_selector). + +* `severity` - + (Required) + The severity to assign to findings generated by the module. + Possible values are: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`. + +* `description` - + (Optional) + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + +* `recommendation` - + (Required) + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. + + +The `predicate` block supports: + +* `expression` - + (Required) + Textual representation of an expression in Common Expression Language syntax. + +* `title` - + (Optional) + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + +* `description` - + (Optional) + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + +* `location` - + (Optional) + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + +The `custom_output` block supports: + +* `properties` - + (Optional) + A list of custom output properties to add to the finding. + Structure is [documented below](#nested_properties). + + +The `properties` block supports: + +* `name` - + (Optional) + Name of the property for the custom output. + +* `value_expression` - + (Optional) + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + Structure is [documented below](#nested_value_expression). + + +The `value_expression` block supports: + +* `expression` - + (Required) + Textual representation of an expression in Common Expression Language syntax. + +* `title` - + (Optional) + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + +* `description` - + (Optional) + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + +* `location` - + (Optional) + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + +The `resource_selector` block supports: + +* `resource_types` - + (Required) + The resource types to run the detector on. + +- - - + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}}` + +* `name` - + The resource name of the custom module. Its format is "organizations/{org_id}/securityHealthAnalyticsSettings/customModules/{customModule}". + The id {customModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + +* `update_time` - + The time at which the custom module was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `last_editor` - + The editor that last updated the custom module. + +* `ancestor_module` - + If empty, indicates that the custom module was created in the organization, folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +OrganizationCustomModule can be imported using any of these accepted formats: + +``` +$ terraform import google_scc_organization_custom_module.default organizations/{{organization}}/securityHealthAnalyticsSettings/customModules/{{name}} +$ terraform import google_scc_organization_custom_module.default {{organization}}/{{name}} +``` diff --git a/website/docs/r/scc_project_custom_module.html.markdown b/website/docs/r/scc_project_custom_module.html.markdown index 0b6ce94fb6c..0da8c7eab74 100644 --- a/website/docs/r/scc_project_custom_module.html.markdown +++ b/website/docs/r/scc_project_custom_module.html.markdown @@ -33,72 +33,62 @@ To get more information about ProjectCustomModule, see: * How-to Guides * [Overview of custom modules for Security Health Analytics](https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview) - ## Example Usage - Scc Project Custom Module Basic ```hcl resource "google_scc_project_custom_module" "example" { - display_name = "basic_custom_module" - enablement_state = "ENABLED" - custom_config { - predicate { - expression = "resource.rotationPeriod > duration(\"2592000s\")" - } - resource_selector { - resource_types = [ - "cloudkms.googleapis.com/CryptoKey", - ] - } - description = "The rotation period of the identified cryptokey resource exceeds 30 days." - recommendation = "Set the rotation period to at most 30 days." - severity = "MEDIUM" - } + display_name = "basic_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } } ``` - ## Example Usage - Scc Project Custom Module Full ```hcl resource "google_scc_project_custom_module" "example" { - display_name = "full_custom_module" - enablement_state = "ENABLED" - custom_config { - predicate { - expression = "resource.rotationPeriod > duration(\"2592000s\")" - title = "Purpose of the expression" - description = "description of the expression" - location = "location of the expression" - } - custom_output { - properties { - name = "duration" - value_expression { - expression = "resource.rotationPeriod" - title = "Purpose of the expression" - description = "description of the expression" - location = "location of the expression" - } - } - } - resource_selector { - resource_types = [ - "cloudkms.googleapis.com/CryptoKey", - ] - } - severity = "LOW" - description = "Description of the custom module" - recommendation = "Steps to resolve violation" - } + display_name = "full_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } } ``` diff --git a/website/docs/r/sql_database_instance.html.markdown b/website/docs/r/sql_database_instance.html.markdown index 7cf741ed926..5dd2950c16e 100644 --- a/website/docs/r/sql_database_instance.html.markdown +++ b/website/docs/r/sql_database_instance.html.markdown @@ -471,7 +471,7 @@ to work, cannot be updated, and supports: * `password` - (Optional) Password for the replication connection. -* `sslCipher` - (Optional) Permissible ciphers for use in SSL encryption. +* `ssl_cipher` - (Optional) Permissible ciphers for use in SSL encryption. * `username` - (Optional) Username for replication connection. @@ -486,6 +486,8 @@ The optional `clone` block supports: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +* `preferred_zone` - (Optional) (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance. [clone-unavailable-instance](https://cloud.google.com/sql/docs/postgres/clone-instance#clone-unavailable-instance) + * `database_names` - (Optional) (SQL Server only, use with `point_in_time`) Clone only the specified databases from the source instance. Clone all databases if empty. * `allocated_ip_range` - (Optional) The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?. diff --git a/website/docs/r/storage_transfer_job.html.markdown b/website/docs/r/storage_transfer_job.html.markdown index 4d03338c69b..370e6c3277b 100644 --- a/website/docs/r/storage_transfer_job.html.markdown +++ b/website/docs/r/storage_transfer_job.html.markdown @@ -116,10 +116,12 @@ The following arguments are supported: * `transfer_spec` - (Required) Transfer specification. Structure [documented below](#nested_transfer_spec). -* `schedule` - (Required) Schedule specification defining when the Transfer Job should be scheduled to start, end and what time to run. Structure [documented below](#nested_schedule). - - - - +* `schedule` - (Optional) Schedule specification defining when the Transfer Job should be scheduled to start, end and what time to run. Structure [documented below](#nested_schedule). Either `schedule` or `event_stream` must be set. + +* `event_stream` - (Optional) Specifies the Event-driven transfer options. Event-driven transfers listen to an event stream to transfer updated files. Structure [documented below](#nested_event_stream) Either `event_stream` or `schedule` must be set. + * `project` - (Optional) The project in which the resource belongs. If it is not provided, the provider project is used. @@ -161,6 +163,14 @@ The following arguments are supported: * `repeat_interval` - (Optional) Interval between the start of each scheduled transfer. If unspecified, the default value is 24 hours. This value may not be less than 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". +The `event_stream` block supports: + +* `name` - (Required) Specifies a unique name of the resource such as AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name', or Pub/Sub subscription resource name in the form 'projects/{project}/subscriptions/{sub}'. + +* `event_stream_start_time` - (Optional) Specifies the date and time that Storage Transfer Service starts listening for events from this stream. If no start time is specified or start time is in the past, Storage Transfer Service starts listening immediately. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `event_stream_expiration_time` - (Optional) Specifies the data and time at which Storage Transfer Service stops listening for events from this stream. After this time, any transfers in progress will complete, but no new transfers are initiated.A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + The `object_conditions` block supports: * `max_time_elapsed_since_last_modification` - (Optional) A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". diff --git a/website/docs/r/vertex_ai_index.html.markdown b/website/docs/r/vertex_ai_index.html.markdown index 3f646408877..dddaa3535ed 100644 --- a/website/docs/r/vertex_ai_index.html.markdown +++ b/website/docs/r/vertex_ai_index.html.markdown @@ -161,7 +161,7 @@ The following arguments are supported: The `metadata` block supports: * `contents_delta_uri` - - (Optional) + (Required) Allows inserting, updating or deleting the contents of the Matching Engine Index. The string must be a valid Cloud Storage directory path. If this field is set when calling IndexService.UpdateIndex, then no other diff --git a/website/docs/r/workstations_workstation_config.html.markdown b/website/docs/r/workstations_workstation_config.html.markdown index d4dfed7e263..d5321de2129 100644 --- a/website/docs/r/workstations_workstation_config.html.markdown +++ b/website/docs/r/workstations_workstation_config.html.markdown @@ -78,6 +78,7 @@ resource "google_workstations_workstation_config" "default" { idle_timeout = "600s" running_timeout = "21600s" + replica_zones = ["us-central1-a", "us-central1-b"] annotations = { label-one = "value-one" } @@ -538,6 +539,15 @@ The following arguments are supported: How long to wait before automatically stopping a workstation after it was started. A value of 0 indicates that workstations using this configuration should never time out from running duration. Must be greater than 0 and less than 24 hours if `encryption_key` is set. Defaults to 12 hours. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". +* `replica_zones` - + (Optional) + Specifies the zones used to replicate the VM and disk resources within the region. If set, exactly two zones within the workstation cluster's region must be specified—for example, `['us-central1-a', 'us-central1-f']`. + If this field is empty, two default zones within the region are used. Immutable after the workstation configuration is created. + +* `enable_audit_agent` - + (Optional) + Whether to enable Linux `auditd` logging on the workstation. When enabled, a service account must also be specified that has `logging.buckets.write` permission on the project. Operating system audit logging is distinct from Cloud Audit Logs. + * `host` - (Optional) Runtime host for a workstation. @@ -583,6 +593,10 @@ The following arguments are supported: (Optional) Email address of the service account that will be used on VM instances used to support this config. This service account must have permission to pull the specified container image. If not set, VMs will run without a service account, in which case the image must be publicly accessible. +* `service_account_scopes` - + (Optional) + Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + * `pool_size` - (Optional) Number of instances to pool for faster workstation startup.