diff --git a/.changelog/5607.txt b/.changelog/5607.txt new file mode 100644 index 00000000000..16454ee8211 --- /dev/null +++ b/.changelog/5607.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +apigee: added ip_range field to `google_apigee_instance` +``` diff --git a/google/dcl.go b/google/dcl.go new file mode 100644 index 00000000000..4ec72490b7f --- /dev/null +++ b/google/dcl.go @@ -0,0 +1,20 @@ +package google + +import ( + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" +) + +var ( + // CreateDirective restricts Apply to creating resources for Create + CreateDirective = []dcl.ApplyOption{ + dcl.WithLifecycleParam(dcl.BlockAcquire), + dcl.WithLifecycleParam(dcl.BlockDestruction), + dcl.WithLifecycleParam(dcl.BlockModification), + } + + // UpdateDirective restricts Apply to modifying resources for Update + UpdateDirective = []dcl.ApplyOption{ + dcl.WithLifecycleParam(dcl.BlockCreation), + dcl.WithLifecycleParam(dcl.BlockDestruction), + } +) diff --git a/google/dcl_logger.go b/google/dcl_logger.go new file mode 100644 index 00000000000..6fb1ba75482 --- /dev/null +++ b/google/dcl_logger.go @@ -0,0 +1,38 @@ +package google + +import ( + "fmt" + "log" +) + +type dclLogger struct{} + +// Fatal records Fatal errors. +func (l dclLogger) Fatal(args ...interface{}) { + log.Fatal(args...) +} + +// Fatalf records Fatal errors with added arguments. +func (l dclLogger) Fatalf(format string, args ...interface{}) { + log.Fatalf(fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) +} + +// Info records Info errors. +func (l dclLogger) Info(args ...interface{}) { + log.Print(args...) +} + +// Infof records Info errors with added arguments. +func (l dclLogger) Infof(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) +} + +// Warningf records Warning errors with added arguments. +func (l dclLogger) Warningf(format string, args ...interface{}) { + log.Printf(fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) +} + +// Warning records Warning errors. +func (l dclLogger) Warning(args ...interface{}) { + log.Print(args...) +} diff --git a/google/expanders.go b/google/expanders.go new file mode 100644 index 00000000000..43b113a8f48 --- /dev/null +++ b/google/expanders.go @@ -0,0 +1,65 @@ +package google + +import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +func expandStringArray(v interface{}) []string { + arr, ok := v.([]string) + + if ok { + return arr + } + + if arr, ok := v.(*schema.Set); ok { + return convertStringSet(arr) + } + + arr = convertStringArr(v.([]interface{})) + if arr == nil { + // Send empty array specifically instead of nil + return make([]string, 0) + } + return arr +} + +func expandIntegerArray(v interface{}) []int64 { + arr, ok := v.([]int64) + + if ok { + return arr + } + + if arr, ok := v.(*schema.Set); ok { + return convertIntegerSet(arr) + } + + return convertIntegerArr(v.([]interface{})) +} + +func convertIntegerSet(v *schema.Set) []int64 { + return convertIntegerArr(v.List()) +} + +func convertIntegerArr(v []interface{}) []int64 { + var vi []int64 + for _, vs := range v { + vi = append(vi, int64(vs.(int))) + } + return vi +} + +// Returns the DCL representation of a three-state boolean value represented by a string in terraform. +func expandEnumBool(v interface{}) *bool { + s, ok := v.(string) + if !ok { + return nil + } + switch s { + case "TRUE": + b := true + return &b + case "FALSE": + b := false + return &b + } + return nil +} diff --git a/google/flatteners.go b/google/flatteners.go new file mode 100644 index 00000000000..707d823b112 --- /dev/null +++ b/google/flatteners.go @@ -0,0 +1,13 @@ +package google + +// Returns the terraform representation of a three-state boolean value represented by a pointer to bool in DCL. +func flattenEnumBool(v interface{}) string { + b, ok := v.(*bool) + if !ok || b == nil { + return "" + } + if *b { + return "TRUE" + } + return "FALSE" +} diff --git a/google/orgpolicy_utils.go b/google/orgpolicy_utils.go new file mode 100644 index 00000000000..56c680456f0 --- /dev/null +++ b/google/orgpolicy_utils.go @@ -0,0 +1,28 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// OrgPolicyPolicy has a custom import method because the parent field needs to allow an additional forward slash +// to represent the type of parent (e.g. projects/{project_id}). +func resourceOrgPolicyPolicyCustomImport(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + if err := parseImportId([]string{ + "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", + "^(?P[^/]+/?[^/]*)/(?P[^/]+)", + }, d, config); err != nil { + return err + } + + // Replace import id for the resource id + id, err := replaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} diff --git a/google/provider_dcl_client_creation.go b/google/provider_dcl_client_creation.go new file mode 100644 index 00000000000..0b61707edf6 --- /dev/null +++ b/google/provider_dcl_client_creation.go @@ -0,0 +1,334 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "time" + + assuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" + cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" + cloudresourcemanager "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + orgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" + osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" + privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" +) + +func NewDCLAssuredWorkloadsClient(config *Config, userAgent, billingProject string, timeout time.Duration) *assuredworkloads.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.AssuredWorkloadsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return assuredworkloads.NewClient(dclConfig) +} + +func NewDCLCloudbuildClient(config *Config, userAgent, billingProject string, timeout time.Duration) *cloudbuild.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.CloudBuildWorkerPoolBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return cloudbuild.NewClient(dclConfig) +} + +func NewDCLCloudResourceManagerClient(config *Config, userAgent, billingProject string, timeout time.Duration) *cloudresourcemanager.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.CloudResourceManagerBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return cloudresourcemanager.NewClient(dclConfig) +} + +func NewDCLComputeClient(config *Config, userAgent, billingProject string, timeout time.Duration) *compute.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.ComputeBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return compute.NewClient(dclConfig) +} + +func NewDCLContainerAwsClient(config *Config, userAgent, billingProject string, timeout time.Duration) *containeraws.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.ContainerAwsBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return containeraws.NewClient(dclConfig) +} + +func NewDCLContainerAzureClient(config *Config, userAgent, billingProject string, timeout time.Duration) *containerazure.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.ContainerAzureBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return containerazure.NewClient(dclConfig) +} + +func NewDCLDataprocClient(config *Config, userAgent, billingProject string, timeout time.Duration) *dataproc.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.DataprocBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return dataproc.NewClient(dclConfig) +} + +func NewDCLEventarcClient(config *Config, userAgent, billingProject string, timeout time.Duration) *eventarc.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.EventarcBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return eventarc.NewClient(dclConfig) +} + +func NewDCLNetworkConnectivityClient(config *Config, userAgent, billingProject string, timeout time.Duration) *networkconnectivity.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.NetworkConnectivityBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return networkconnectivity.NewClient(dclConfig) +} + +func NewDCLOrgPolicyClient(config *Config, userAgent, billingProject string, timeout time.Duration) *orgpolicy.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.OrgPolicyBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return orgpolicy.NewClient(dclConfig) +} + +func NewDCLOsConfigClient(config *Config, userAgent, billingProject string, timeout time.Duration) *osconfig.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.OSConfigBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return osconfig.NewClient(dclConfig) +} + +func NewDCLPrivatecaClient(config *Config, userAgent, billingProject string, timeout time.Duration) *privateca.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.PrivatecaBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return privateca.NewClient(dclConfig) +} + +func NewDCLRecaptchaEnterpriseClient(config *Config, userAgent, billingProject string, timeout time.Duration) *recaptchaenterprise.Client { + configOptions := []dcl.ConfigOption{ + dcl.WithHTTPClient(config.client), + dcl.WithUserAgent(userAgent), + dcl.WithLogger(dclLogger{}), + dcl.WithBasePath(config.RecaptchaEnterpriseBasePath), + } + + if timeout != 0 { + configOptions = append(configOptions, dcl.WithTimeout(timeout)) + } + + if config.UserProjectOverride { + configOptions = append(configOptions, dcl.WithUserProjectOverride()) + if billingProject != "" { + configOptions = append(configOptions, dcl.WithBillingProject(billingProject)) + } + } + + dclConfig := dcl.NewConfig(configOptions...) + return recaptchaenterprise.NewClient(dclConfig) +} diff --git a/google/provider_dcl_endpoints.go b/google/provider_dcl_endpoints.go new file mode 100644 index 00000000000..2fef6c6294e --- /dev/null +++ b/google/provider_dcl_endpoints.go @@ -0,0 +1,173 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// empty string is passed for dcl default since dcl +// [hardcodes the values](https://github.com/GoogleCloudPlatform/declarative-resource-client-library/blob/main/services/google/eventarc/beta/trigger_internal.go#L96-L103) + +var AssuredWorkloadsEndpointEntryKey = "assured_workloads_custom_endpoint" +var AssuredWorkloadsEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", + }, ""), +} + +var CloudBuildWorkerPoolEndpointEntryKey = "cloud_build_worker_pool_custom_endpoint" +var CloudBuildWorkerPoolEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CLOUD_BUILD_WORKER_POOL_CUSTOM_ENDPOINT", + }, ""), +} + +var CloudResourceManagerEndpointEntryKey = "cloud_resource_manager_custom_endpoint" +var CloudResourceManagerEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", + }, ""), +} + +var ComputeEndpointEntryKey = "compute_custom_endpoint" +var ComputeEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", + }, ""), +} + +var ContainerAwsEndpointEntryKey = "container_aws_custom_endpoint" +var ContainerAwsEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CONTAINER_AWS_CUSTOM_ENDPOINT", + }, ""), +} + +var ContainerAzureEndpointEntryKey = "container_azure_custom_endpoint" +var ContainerAzureEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CONTAINER_AZURE_CUSTOM_ENDPOINT", + }, ""), +} + +var EventarcEndpointEntryKey = "eventarc_custom_endpoint" +var EventarcEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", + }, ""), +} + +var NetworkConnectivityEndpointEntryKey = "network_connectivity_custom_endpoint" +var NetworkConnectivityEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_NETWORK_CONNECTIVITY_CUSTOM_ENDPOINT", + }, ""), +} + +var OrgPolicyEndpointEntryKey = "org_policy_custom_endpoint" +var OrgPolicyEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", + }, ""), +} + +var OSConfigEndpointEntryKey = "os_config_custom_endpoint" +var OSConfigEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", + }, ""), +} + +var PrivatecaEndpointEntryKey = "privateca_custom_endpoint" +var PrivatecaEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", + }, ""), +} + +var RecaptchaEnterpriseEndpointEntryKey = "recaptcha_enterprise_custom_endpoint" +var RecaptchaEnterpriseEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_RECAPTCHA_ENTERPRISE_CUSTOM_ENDPOINT", + }, ""), +} + +//Add new values to config.go.erb config object declaration +//AssuredWorkloadsBasePath string +//CloudBuildWorkerPoolBasePath string +//CloudResourceManagerBasePath string +//ComputeBasePath string +//ContainerAwsBasePath string +//ContainerAzureBasePath string +//EventarcBasePath string +//NetworkConnectivityBasePath string +//OrgPolicyBasePath string +//OSConfigBasePath string +//PrivatecaBasePath string +//RecaptchaEnterpriseBasePath string + +//Add new values to provider.go.erb schema initialization +// AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, +// CloudBuildWorkerPoolEndpointEntryKey: CloudBuildWorkerPoolEndpointEntry, +// CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, +// ComputeEndpointEntryKey: ComputeEndpointEntry, +// ContainerAwsEndpointEntryKey: ContainerAwsEndpointEntry, +// ContainerAzureEndpointEntryKey: ContainerAzureEndpointEntry, +// EventarcEndpointEntryKey: EventarcEndpointEntry, +// NetworkConnectivityEndpointEntryKey: NetworkConnectivityEndpointEntry, +// OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, +// OSConfigEndpointEntryKey: OSConfigEndpointEntry, +// PrivatecaEndpointEntryKey: PrivatecaEndpointEntry, +// RecaptchaEnterpriseEndpointEntryKey: RecaptchaEnterpriseEndpointEntry, + +//Add new values to provider.go.erb - provider block read +// config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) +// config.CloudBuildWorkerPoolBasePath = d.Get(CloudBuildWorkerPoolEndpointEntryKey).(string) +// config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) +// config.ComputeBasePath = d.Get(ComputeEndpointEntryKey).(string) +// config.ContainerAwsBasePath = d.Get(ContainerAwsEndpointEntryKey).(string) +// config.ContainerAzureBasePath = d.Get(ContainerAzureEndpointEntryKey).(string) +// config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) +// config.NetworkConnectivityBasePath = d.Get(NetworkConnectivityEndpointEntryKey).(string) +// config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) +// config.OSConfigBasePath = d.Get(OSConfigEndpointEntryKey).(string) +// config.PrivatecaBasePath = d.Get(PrivatecaEndpointEntryKey).(string) +// config.RecaptchaEnterpriseBasePath = d.Get(RecaptchaEnterpriseEndpointEntryKey).(string) diff --git a/google/resource_apigee_instance.go b/google/resource_apigee_instance.go index 4ad38ce501e..2c6c385d828 100644 --- a/google/resource_apigee_instance.go +++ b/google/resource_apigee_instance.go @@ -80,6 +80,18 @@ Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/c ForceNew: true, Description: `Display name of the instance.`, }, + "ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `IP range represents the customer-provided CIDR block of length 22 that will be used for +the Apigee instance creation. This optional range, if provided, should be freely +available as part of larger named range the customer has allocated to the Service +Networking peering. If this is not provided, Apigee will automatically request for any +available /22 CIDR block from Service Networking. The customer should use this CIDR block +for configuring their firewall needs to allow traffic from Apigee. +Input format: "a.b.c.d/22"`, + }, "peering_cidr_range": { Type: schema.TypeString, Optional: true, @@ -128,6 +140,12 @@ func resourceApigeeInstanceCreate(d *schema.ResourceData, meta interface{}) erro } else if v, ok := d.GetOkExists("peering_cidr_range"); !isEmptyValue(reflect.ValueOf(peeringCidrRangeProp)) && (ok || !reflect.DeepEqual(v, peeringCidrRangeProp)) { obj["peeringCidrRange"] = peeringCidrRangeProp } + ipRangeProp, err := expandApigeeInstanceIpRange(d.Get("ip_range"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_range"); !isEmptyValue(reflect.ValueOf(ipRangeProp)) && (ok || !reflect.DeepEqual(v, ipRangeProp)) { + obj["ipRange"] = ipRangeProp + } descriptionProp, err := expandApigeeInstanceDescription(d.Get("description"), d, config) if err != nil { return err @@ -233,6 +251,9 @@ func resourceApigeeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("peering_cidr_range", flattenApigeeInstancePeeringCidrRange(res["peeringCidrRange"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } + if err := d.Set("ip_range", flattenApigeeInstanceIpRange(res["ipRange"], d, config)); err != nil { + return fmt.Errorf("Error reading Instance: %s", err) + } if err := d.Set("description", flattenApigeeInstanceDescription(res["description"], d, config)); err != nil { return fmt.Errorf("Error reading Instance: %s", err) } @@ -348,6 +369,10 @@ func flattenApigeeInstancePeeringCidrRange(v interface{}, d *schema.ResourceData return v } +func flattenApigeeInstanceIpRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + func flattenApigeeInstanceDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { return v } @@ -380,6 +405,10 @@ func expandApigeeInstancePeeringCidrRange(v interface{}, d TerraformResourceData return v, nil } +func expandApigeeInstanceIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + func expandApigeeInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/google/resource_apigee_instance_generated_test.go b/google/resource_apigee_instance_generated_test.go index 77c1673dece..38463536ef0 100644 --- a/google/resource_apigee_instance_generated_test.go +++ b/google/resource_apigee_instance_generated_test.go @@ -208,6 +208,99 @@ resource "google_apigee_instance" "apigee_instance" { `, context) } +func TestAccApigeeInstance_apigeeInstanceIpRangeTestExample(t *testing.T) { + skipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "billing_account": getTestBillingAccountFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckApigeeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApigeeInstance_apigeeInstanceIpRangeTestExample(context), + }, + { + ResourceName: "google_apigee_instance.apigee_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"org_id"}, + }, + }, + }) +} + +func testAccApigeeInstance_apigeeInstanceIpRangeTestExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "apigee" { + project = google_project.project.project_id + service = "apigee.googleapis.com" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_project_service" "servicenetworking" { + project = google_project.project.project_id + service = "servicenetworking.googleapis.com" +} + +resource "google_compute_network" "apigee_network" { + name = "apigee-network" + project = google_project.project.project_id + depends_on = [google_project_service.compute] +} + +resource "google_compute_global_address" "apigee_range" { + name = "apigee-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 22 + network = google_compute_network.apigee_network.id + project = google_project.project.project_id +} + +resource "google_service_networking_connection" "apigee_vpc_connection" { + network = google_compute_network.apigee_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.apigee_range.name] + depends_on = [google_project_service.servicenetworking] +} + +resource "google_apigee_organization" "apigee_org" { + analytics_region = "us-central1" + project_id = google_project.project.project_id + authorized_network = google_compute_network.apigee_network.id + depends_on = [ + google_service_networking_connection.apigee_vpc_connection, + google_project_service.apigee, + ] +} + +resource "google_apigee_instance" "apigee_instance" { + name = "tf-test%{random_suffix}" + location = "us-central1" + org_id = google_apigee_organization.apigee_org.id + ip_range = "10.87.8.0/22" +} +`, context) +} + func testAccCheckApigeeInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/resource_assured_workloads_workload.go b/google/resource_assured_workloads_workload.go new file mode 100644 index 00000000000..b730a777684 --- /dev/null +++ b/google/resource_assured_workloads_workload.go @@ -0,0 +1,566 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + assuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" +) + +func resourceAssuredWorkloadsWorkload() *schema.Resource { + return &schema.Resource{ + Create: resourceAssuredWorkloadsWorkloadCreate, + Read: resourceAssuredWorkloadsWorkloadRead, + Update: resourceAssuredWorkloadsWorkloadUpdate, + Delete: resourceAssuredWorkloadsWorkloadDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAssuredWorkloadsWorkloadImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "billing_account": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, 'billingAccounts/012345-567890-ABCDEF`.", + }, + + "compliance_regime": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS", + }, + + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "organization": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The organization for the resource", + }, + + "kms_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Input only. Settings used to create a CMEK crypto key. When set a project with a KMS CMEK key is provisioned. This field is mandatory for a subset of Compliance Regimes.", + MaxItems: 1, + Elem: AssuredWorkloadsWorkloadKmsSettingsSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels applied to the workload.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "provisioned_resources_parent": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either an organization or a folder. Must be the same or a child of the Workload parent. If not specified all resources are created under the Workload parent. Formats: folders/{folder_id}, organizations/{organization_id}", + }, + + "resource_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", + Elem: AssuredWorkloadsWorkloadResourceSettingsSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Immutable. The Workload creation timestamp.", + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The resource name of the workload.", + }, + + "resources": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", + Elem: AssuredWorkloadsWorkloadResourcesSchema(), + }, + }, + } +} + +func AssuredWorkloadsWorkloadKmsSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "next_rotation_time": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", + }, + + "rotation_period": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourceSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.", + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func AssuredWorkloadsWorkloadResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_id": { + Type: schema.TypeInt, + Computed: true, + Description: "Resource identifier. For a project this represents project_number.", + }, + + "resource_type": { + Type: schema.TypeString, + Computed: true, + Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", + }, + }, + } +} + +func resourceAssuredWorkloadsWorkloadCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &assuredworkloads.Workload{ + BillingAccount: dcl.String(d.Get("billing_account").(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Labels: checkStringMap(d.Get("labels")), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + } + + id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Workload: %s", err) + } + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // Id has a server-generated value, set again after creation + id, err = replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &assuredworkloads.Workload{ + BillingAccount: dcl.String(d.Get("billing_account").(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Labels: checkStringMap(d.Get("labels")), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkload(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("AssuredWorkloadsWorkload %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("billing_account", res.BillingAccount); err != nil { + return fmt.Errorf("error setting billing_account in state: %s", err) + } + if err = d.Set("compliance_regime", res.ComplianceRegime); err != nil { + return fmt.Errorf("error setting compliance_regime in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("organization", res.Organization); err != nil { + return fmt.Errorf("error setting organization in state: %s", err) + } + if err = d.Set("kms_settings", flattenAssuredWorkloadsWorkloadKmsSettings(res.KmsSettings)); err != nil { + return fmt.Errorf("error setting kms_settings in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("provisioned_resources_parent", res.ProvisionedResourcesParent); err != nil { + return fmt.Errorf("error setting provisioned_resources_parent in state: %s", err) + } + if err = d.Set("resource_settings", flattenAssuredWorkloadsWorkloadResourceSettingsArray(res.ResourceSettings)); err != nil { + return fmt.Errorf("error setting resource_settings in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("resources", flattenAssuredWorkloadsWorkloadResourcesArray(res.Resources)); err != nil { + return fmt.Errorf("error setting resources in state: %s", err) + } + + return nil +} +func resourceAssuredWorkloadsWorkloadUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &assuredworkloads.Workload{ + BillingAccount: dcl.String(d.Get("billing_account").(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Labels: checkStringMap(d.Get("labels")), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + // Construct state hint from old values + old := &assuredworkloads.Workload{ + BillingAccount: dcl.String(oldValue(d.GetChange("billing_account")).(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(oldValue(d.GetChange("compliance_regime")).(string)), + DisplayName: dcl.String(oldValue(d.GetChange("display_name")).(string)), + Location: dcl.String(oldValue(d.GetChange("location")).(string)), + Organization: dcl.String(oldValue(d.GetChange("organization")).(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(oldValue(d.GetChange("kms_settings"))), + Labels: checkStringMap(oldValue(d.GetChange("labels"))), + ProvisionedResourcesParent: dcl.String(oldValue(d.GetChange("provisioned_resources_parent")).(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(oldValue(d.GetChange("resource_settings"))), + Name: dcl.StringOrNil(oldValue(d.GetChange("name")).(string)), + } + directive := UpdateDirective + directive = append(directive, dcl.WithStateHint(old)) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkload(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Workload: %s", err) + } + + log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) + + return resourceAssuredWorkloadsWorkloadRead(d, meta) +} + +func resourceAssuredWorkloadsWorkloadDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &assuredworkloads.Workload{ + BillingAccount: dcl.String(d.Get("billing_account").(string)), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), + DisplayName: dcl.String(d.Get("display_name").(string)), + Location: dcl.String(d.Get("location").(string)), + Organization: dcl.String(d.Get("organization").(string)), + KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), + Labels: checkStringMap(d.Get("labels")), + ProvisionedResourcesParent: dcl.String(d.Get("provisioned_resources_parent").(string)), + ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Workload %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkload(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Workload: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Workload %q", d.Id()) + return nil +} + +func resourceAssuredWorkloadsWorkloadImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "organizations/(?P[^/]+)/locations/(?P[^/]+)/workloads/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandAssuredWorkloadsWorkloadKmsSettings(o interface{}) *assuredworkloads.WorkloadKmsSettings { + if o == nil { + return assuredworkloads.EmptyWorkloadKmsSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return assuredworkloads.EmptyWorkloadKmsSettings + } + obj := objArr[0].(map[string]interface{}) + return &assuredworkloads.WorkloadKmsSettings{ + NextRotationTime: dcl.String(obj["next_rotation_time"].(string)), + RotationPeriod: dcl.String(obj["rotation_period"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadKmsSettings(obj *assuredworkloads.WorkloadKmsSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "next_rotation_time": obj.NextRotationTime, + "rotation_period": obj.RotationPeriod, + } + + return []interface{}{transformed} + +} +func expandAssuredWorkloadsWorkloadResourceSettingsArray(o interface{}) []assuredworkloads.WorkloadResourceSettings { + if o == nil { + return make([]assuredworkloads.WorkloadResourceSettings, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]assuredworkloads.WorkloadResourceSettings, 0) + } + + items := make([]assuredworkloads.WorkloadResourceSettings, 0, len(objs)) + for _, item := range objs { + i := expandAssuredWorkloadsWorkloadResourceSettings(item) + items = append(items, *i) + } + + return items +} + +func expandAssuredWorkloadsWorkloadResourceSettings(o interface{}) *assuredworkloads.WorkloadResourceSettings { + if o == nil { + return assuredworkloads.EmptyWorkloadResourceSettings + } + + obj := o.(map[string]interface{}) + return &assuredworkloads.WorkloadResourceSettings{ + ResourceId: dcl.String(obj["resource_id"].(string)), + ResourceType: assuredworkloads.WorkloadResourceSettingsResourceTypeEnumRef(obj["resource_type"].(string)), + } +} + +func flattenAssuredWorkloadsWorkloadResourceSettingsArray(objs []assuredworkloads.WorkloadResourceSettings) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResourceSettings(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResourceSettings(obj *assuredworkloads.WorkloadResourceSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} + +func flattenAssuredWorkloadsWorkloadResourcesArray(objs []assuredworkloads.WorkloadResources) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenAssuredWorkloadsWorkloadResources(&item) + items = append(items, i) + } + + return items +} + +func flattenAssuredWorkloadsWorkloadResources(obj *assuredworkloads.WorkloadResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_id": obj.ResourceId, + "resource_type": obj.ResourceType, + } + + return transformed + +} diff --git a/google/resource_assured_workloads_workload_generated_test.go b/google/resource_assured_workloads_workload_generated_test.go new file mode 100644 index 00000000000..2c9a8a71e28 --- /dev/null +++ b/google/resource_assured_workloads_workload_generated_test.go @@ -0,0 +1,194 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + assuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccAssuredWorkloadsWorkload_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": getTestBillingAccountFromEnv(t), + "org_id": getTestOrgFromEnv(t), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "provisioned_resources_parent"}, + }, + { + Config: testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "provisioned_resources_parent"}, + }, + }, + }) +} +func TestAccAssuredWorkloadsWorkload_FullHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_acct": getTestBillingAccountFromEnv(t), + "org_id": getTestOrgFromEnv(t), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAssuredWorkloadsWorkload_FullHandWritten(context), + }, + { + ResourceName: "google_assured_workloads_workload.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"billing_account", "kms_settings", "resource_settings", "provisioned_resources_parent"}, + }, + }, + }) +} + +func testAccAssuredWorkloadsWorkload_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "a" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + labels = { + a = "b" + } + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + provisioned_resources_parent = google_folder.folder1.name + organization = "%{org_id}" + location = "us-central1" +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" +} +`, context) +} + +func testAccAssuredWorkloadsWorkload_FullHandWritten(context map[string]interface{}) string { + return Nprintf(` +resource "google_assured_workloads_workload" "primary" { + display_name = "tf-test-name%{random_suffix}" + billing_account = "billingAccounts/%{billing_acct}" + compliance_regime = "FEDRAMP_MODERATE" + organization = "%{org_id}" + location = "us-central1" + kms_settings { + next_rotation_time = "2022-10-02T15:01:23Z" + rotation_period = "864000s" + } + provisioned_resources_parent = google_folder.folder1.name +} + +resource "google_folder" "folder1" { + display_name = "tf-test-name%{random_suffix}" + parent = "organizations/%{org_id}" +} + +`, context) +} + +func testAccCheckAssuredWorkloadsWorkloadDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_assured_workloads_workload" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &assuredworkloads.Workload{ + BillingAccount: dcl.String(rs.Primary.Attributes["billing_account"]), + ComplianceRegime: assuredworkloads.WorkloadComplianceRegimeEnumRef(rs.Primary.Attributes["compliance_regime"]), + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Organization: dcl.String(rs.Primary.Attributes["organization"]), + ProvisionedResourcesParent: dcl.String(rs.Primary.Attributes["provisioned_resources_parent"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := NewDCLAssuredWorkloadsClient(config, config.userAgent, billingProject, 0) + _, err := client.GetWorkload(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_assured_workloads_workload still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_cloudbuild_worker_pool.go b/google/resource_cloudbuild_worker_pool.go new file mode 100644 index 00000000000..4605b64df16 --- /dev/null +++ b/google/resource_cloudbuild_worker_pool.go @@ -0,0 +1,470 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" +) + +func resourceCloudbuildWorkerPool() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildWorkerPoolCreate, + Read: resourceCloudbuildWorkerPoolRead, + Update: resourceCloudbuildWorkerPoolUpdate, + Delete: resourceCloudbuildWorkerPoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudbuildWorkerPoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "User-defined name of the `WorkerPool`.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "User specified annotations. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "A user-specified, human-readable name for the `WorkerPool`. If provided, this value must be 1-63 characters.", + }, + + "network_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Network configuration for the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolNetworkConfigSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: "Configuration to be used for a creating workers in the `WorkerPool`.", + MaxItems: 1, + Elem: CloudbuildWorkerPoolWorkerConfigSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to create the `WorkerPool` was received.", + }, + + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to delete the `WorkerPool` was received.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. `WorkerPool` state. Possible values: STATE_UNSPECIFIED, PENDING, APPROVED, REJECTED, CANCELLED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A unique identifier for the `WorkerPool`.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Time at which the request to update the `WorkerPool` was received.", + }, + }, + } +} + +func CloudbuildWorkerPoolNetworkConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "peered_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareResourceNames, + Description: "Required. Immutable. The network definition that the workers are peered to. If this section is left empty, the workers will be peered to `WorkerPool.project_id` on the service producer network. Must be in the format `projects/{project}/global/networks/{network}`, where `{project}` is a project number, such as `12345`, and `{network}` is the name of a VPC network in the project. See [Understanding network configuration options](https://cloud.google.com/cloud-build/docs/custom-workers/set-up-custom-worker-pool-environment#understanding_the_network_configuration_options)", + }, + }, + } +} + +func CloudbuildWorkerPoolWorkerConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Description: "Size of the disk attached to the worker, in GB. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: "Machine type of a worker, such as `n1-standard-1`. See [Worker pool config file](https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.", + }, + + "no_external_ip": { + Type: schema.TypeBool, + Optional: true, + Description: "If true, workers are created without any public address, which prevents network egress to public IPs.", + }, + }, + } +} + +func resourceCloudbuildWorkerPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuild.WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: checkStringMap(d.Get("annotations")), + DisplayName: dcl.String(d.Get("display_name").(string)), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuild.WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: checkStringMap(d.Get("annotations")), + DisplayName: dcl.String(d.Get("display_name").(string)), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkerPool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("CloudbuildWorkerPool %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("network_config", flattenCloudbuildWorkerPoolNetworkConfig(res.NetworkConfig)); err != nil { + return fmt.Errorf("error setting network_config in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("worker_config", flattenCloudbuildWorkerPoolWorkerConfig(res.WorkerConfig)); err != nil { + return fmt.Errorf("error setting worker_config in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("delete_time", res.DeleteTime); err != nil { + return fmt.Errorf("error setting delete_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceCloudbuildWorkerPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuild.WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: checkStringMap(d.Get("annotations")), + DisplayName: dcl.String(d.Get("display_name").(string)), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkerPool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkerPool %q: %#v", d.Id(), res) + + return resourceCloudbuildWorkerPoolRead(d, meta) +} + +func resourceCloudbuildWorkerPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &cloudbuild.WorkerPool{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Annotations: checkStringMap(d.Get("annotations")), + DisplayName: dcl.String(d.Get("display_name").(string)), + NetworkConfig: expandCloudbuildWorkerPoolNetworkConfig(d.Get("network_config")), + Project: dcl.String(project), + WorkerConfig: expandCloudbuildWorkerPoolWorkerConfig(d.Get("worker_config")), + } + + log.Printf("[DEBUG] Deleting WorkerPool %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLCloudbuildClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkerPool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkerPool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkerPool %q", d.Id()) + return nil +} + +func resourceCloudbuildWorkerPoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workerPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workerPools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandCloudbuildWorkerPoolNetworkConfig(o interface{}) *cloudbuild.WorkerPoolNetworkConfig { + if o == nil { + return cloudbuild.EmptyWorkerPoolNetworkConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return cloudbuild.EmptyWorkerPoolNetworkConfig + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuild.WorkerPoolNetworkConfig{ + PeeredNetwork: dcl.String(obj["peered_network"].(string)), + } +} + +func flattenCloudbuildWorkerPoolNetworkConfig(obj *cloudbuild.WorkerPoolNetworkConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "peered_network": obj.PeeredNetwork, + } + + return []interface{}{transformed} + +} + +func expandCloudbuildWorkerPoolWorkerConfig(o interface{}) *cloudbuild.WorkerPoolWorkerConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &cloudbuild.WorkerPoolWorkerConfig{ + DiskSizeGb: dcl.Int64(int64(obj["disk_size_gb"].(int))), + MachineType: dcl.String(obj["machine_type"].(string)), + NoExternalIP: dcl.Bool(obj["no_external_ip"].(bool)), + } +} + +func flattenCloudbuildWorkerPoolWorkerConfig(obj *cloudbuild.WorkerPoolWorkerConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disk_size_gb": obj.DiskSizeGb, + "machine_type": obj.MachineType, + "no_external_ip": obj.NoExternalIP, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_cloudbuild_worker_pool_sweeper_test.go b/google/resource_cloudbuild_worker_pool_sweeper_test.go new file mode 100644 index 00000000000..d6d88871227 --- /dev/null +++ b/google/resource_cloudbuild_worker_pool_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + cloudbuild "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudbuild" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("CloudbuildWorkerPool", &resource.Sweeper{ + Name: "CloudbuildWorkerPool", + F: testSweepCloudbuildWorkerPool, + }) +} + +func testSweepCloudbuildWorkerPool(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for CloudbuildWorkerPool") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLCloudbuildClient(config, config.userAgent, "", 0) + err = client.DeleteAllWorkerPool(context.Background(), d["project"], d["location"], isDeletableCloudbuildWorkerPool) + if err != nil { + return err + } + return nil +} + +func isDeletableCloudbuildWorkerPool(r *cloudbuild.WorkerPool) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_compute_firewall_policy.go b/google/resource_compute_firewall_policy.go new file mode 100644 index 00000000000..f9b0f3f8130 --- /dev/null +++ b/google/resource_compute_firewall_policy.go @@ -0,0 +1,329 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" +) + +func resourceComputeFirewallPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallPolicyCreate, + Read: resourceComputeFirewallPolicyRead, + Update: resourceComputeFirewallPolicyUpdate, + Delete: resourceComputeFirewallPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeFirewallPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The parent of the firewall policy.", + }, + + "short_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "An optional description of this resource. Provide this property when you create the resource.", + }, + + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: "Creation timestamp in RFC3339 text format.", + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: "Fingerprint of the resource. This field is used internally during updates of this resource.", + }, + + "firewall_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "The unique identifier for the resource. This identifier is defined by the server.", + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy.", + }, + + "rule_tuple_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples.", + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: "Server-defined URL for the resource.", + }, + + "self_link_with_id": { + Type: schema.TypeString, + Computed: true, + Description: "Server-defined URL for this resource with the resource id.", + }, + }, + } +} + +func resourceComputeFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicy{ + Parent: dcl.String(d.Get("parent").(string)), + ShortName: dcl.String(d.Get("short_name").(string)), + Description: dcl.String(d.Get("description").(string)), + } + + id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicy(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FirewallPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicy %q: %#v", d.Id(), res) + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // Id has a server-generated value, set again after creation + id, err = replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceComputeFirewallPolicyRead(d, meta) +} + +func resourceComputeFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicy{ + Parent: dcl.String(d.Get("parent").(string)), + ShortName: dcl.String(d.Get("short_name").(string)), + Description: dcl.String(d.Get("description").(string)), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFirewallPolicy(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ComputeFirewallPolicy %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("parent", res.Parent); err != nil { + return fmt.Errorf("error setting parent in state: %s", err) + } + if err = d.Set("short_name", res.ShortName); err != nil { + return fmt.Errorf("error setting short_name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("creation_timestamp", res.CreationTimestamp); err != nil { + return fmt.Errorf("error setting creation_timestamp in state: %s", err) + } + if err = d.Set("fingerprint", res.Fingerprint); err != nil { + return fmt.Errorf("error setting fingerprint in state: %s", err) + } + if err = d.Set("firewall_policy_id", res.Id); err != nil { + return fmt.Errorf("error setting firewall_policy_id in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { + return fmt.Errorf("error setting rule_tuple_count in state: %s", err) + } + if err = d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("error setting self_link in state: %s", err) + } + if err = d.Set("self_link_with_id", res.SelfLinkWithId); err != nil { + return fmt.Errorf("error setting self_link_with_id in state: %s", err) + } + + return nil +} +func resourceComputeFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicy{ + Parent: dcl.String(d.Get("parent").(string)), + ShortName: dcl.String(d.Get("short_name").(string)), + Description: dcl.String(d.Get("description").(string)), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicy(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating FirewallPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicy %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyRead(d, meta) +} + +func resourceComputeFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicy{ + Parent: dcl.String(d.Get("parent").(string)), + ShortName: dcl.String(d.Get("short_name").(string)), + Description: dcl.String(d.Get("description").(string)), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting FirewallPolicy %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFirewallPolicy(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FirewallPolicy: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FirewallPolicy %q", d.Id()) + return nil +} + +func resourceComputeFirewallPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "locations/global/firewallPolicies/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_firewall_policy_association.go b/google/resource_compute_firewall_policy_association.go new file mode 100644 index 00000000000..ed5c681cf0a --- /dev/null +++ b/google/resource_compute_firewall_policy_association.go @@ -0,0 +1,222 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" +) + +func resourceComputeFirewallPolicyAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallPolicyAssociationCreate, + Read: resourceComputeFirewallPolicyAssociationRead, + Delete: resourceComputeFirewallPolicyAssociationDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeFirewallPolicyAssociationImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "attachment_target": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The target that the firewall policy is attached to.", + }, + + "firewall_policy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The firewall policy ID of the association.", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name for an association.", + }, + + "short_name": { + Type: schema.TypeString, + Computed: true, + Description: "The short name of the firewall policy of the association.", + }, + }, + } +} + +func resourceComputeFirewallPolicyAssociationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyAssociation{ + AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Name: dcl.String(d.Get("name").(string)), + } + + id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicyAssociation(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FirewallPolicyAssociation: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicyAssociation %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyAssociationRead(d, meta) +} + +func resourceComputeFirewallPolicyAssociationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyAssociation{ + AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Name: dcl.String(d.Get("name").(string)), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFirewallPolicyAssociation(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ComputeFirewallPolicyAssociation %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { + return fmt.Errorf("error setting attachment_target in state: %s", err) + } + if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { + return fmt.Errorf("error setting firewall_policy in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("short_name", res.ShortName); err != nil { + return fmt.Errorf("error setting short_name in state: %s", err) + } + + return nil +} + +func resourceComputeFirewallPolicyAssociationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyAssociation{ + AttachmentTarget: dcl.String(d.Get("attachment_target").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Name: dcl.String(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting FirewallPolicyAssociation %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFirewallPolicyAssociation(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FirewallPolicyAssociation: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FirewallPolicyAssociation %q", d.Id()) + return nil +} + +func resourceComputeFirewallPolicyAssociationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "locations/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_firewall_policy_rule.go b/google/resource_compute_firewall_policy_rule.go new file mode 100644 index 00000000000..1a3e45cbe17 --- /dev/null +++ b/google/resource_compute_firewall_policy_rule.go @@ -0,0 +1,500 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" +) + +func resourceComputeFirewallPolicyRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallPolicyRuleCreate, + Read: resourceComputeFirewallPolicyRuleRead, + Update: resourceComputeFirewallPolicyRuleUpdate, + Delete: resourceComputeFirewallPolicyRuleDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeFirewallPolicyRuleImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + }, + + "direction": { + Type: schema.TypeString, + Required: true, + Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", + }, + + "firewall_policy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The firewall policy of the resource.", + }, + + "match": { + Type: schema.TypeList, + Required: true, + Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", + MaxItems: 1, + Elem: ComputeFirewallPolicyRuleMatchSchema(), + }, + + "priority": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "An optional description for this resource.", + }, + + "disabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", + }, + + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", + }, + + "target_resources": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "target_service_accounts": { + Type: schema.TypeList, + Optional: true, + Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "kind": { + Type: schema.TypeString, + Computed: true, + Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", + }, + + "rule_tuple_count": { + Type: schema.TypeInt, + Computed: true, + Description: "Calculation of the complexity of a single firewall policy rule.", + }, + }, + } +} + +func ComputeFirewallPolicyRuleMatchSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layer4_configs": { + Type: schema.TypeList, + Required: true, + Description: "Pairs of IP protocols and ports that the rule should match.", + Elem: ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema(), + }, + + "dest_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "src_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_protocol": { + Type: schema.TypeString, + Required: true, + Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", + }, + + "ports": { + Type: schema.TypeList, + Optional: true, + Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceComputeFirewallPolicyRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: expandStringArray(d.Get("target_resources")), + TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + } + + id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyRuleRead(d, meta) +} + +func resourceComputeFirewallPolicyRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: expandStringArray(d.Get("target_resources")), + TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetFirewallPolicyRule(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ComputeFirewallPolicyRule %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("action", res.Action); err != nil { + return fmt.Errorf("error setting action in state: %s", err) + } + if err = d.Set("direction", res.Direction); err != nil { + return fmt.Errorf("error setting direction in state: %s", err) + } + if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { + return fmt.Errorf("error setting firewall_policy in state: %s", err) + } + if err = d.Set("match", flattenComputeFirewallPolicyRuleMatch(res.Match)); err != nil { + return fmt.Errorf("error setting match in state: %s", err) + } + if err = d.Set("priority", res.Priority); err != nil { + return fmt.Errorf("error setting priority in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("disabled", res.Disabled); err != nil { + return fmt.Errorf("error setting disabled in state: %s", err) + } + if err = d.Set("enable_logging", res.EnableLogging); err != nil { + return fmt.Errorf("error setting enable_logging in state: %s", err) + } + if err = d.Set("target_resources", res.TargetResources); err != nil { + return fmt.Errorf("error setting target_resources in state: %s", err) + } + if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { + return fmt.Errorf("error setting target_service_accounts in state: %s", err) + } + if err = d.Set("kind", res.Kind); err != nil { + return fmt.Errorf("error setting kind in state: %s", err) + } + if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { + return fmt.Errorf("error setting rule_tuple_count in state: %s", err) + } + + return nil +} +func resourceComputeFirewallPolicyRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: expandStringArray(d.Get("target_resources")), + TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyFirewallPolicyRule(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) + + return resourceComputeFirewallPolicyRuleRead(d, meta) +} + +func resourceComputeFirewallPolicyRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &compute.FirewallPolicyRule{ + Action: dcl.String(d.Get("action").(string)), + Direction: compute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), + FirewallPolicy: dcl.String(d.Get("firewall_policy").(string)), + Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), + Priority: dcl.Int64(int64(d.Get("priority").(int))), + Description: dcl.String(d.Get("description").(string)), + Disabled: dcl.Bool(d.Get("disabled").(bool)), + EnableLogging: dcl.Bool(d.Get("enable_logging").(bool)), + TargetResources: expandStringArray(d.Get("target_resources")), + TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), + } + + log.Printf("[DEBUG] Deleting FirewallPolicyRule %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteFirewallPolicyRule(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting FirewallPolicyRule: %s", err) + } + + log.Printf("[DEBUG] Finished deleting FirewallPolicyRule %q", d.Id()) + return nil +} + +func resourceComputeFirewallPolicyRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "locations/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandComputeFirewallPolicyRuleMatch(o interface{}) *compute.FirewallPolicyRuleMatch { + if o == nil { + return compute.EmptyFirewallPolicyRuleMatch + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return compute.EmptyFirewallPolicyRuleMatch + } + obj := objArr[0].(map[string]interface{}) + return &compute.FirewallPolicyRuleMatch{ + Layer4Configs: expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), + DestIPRanges: expandStringArray(obj["dest_ip_ranges"]), + SrcIPRanges: expandStringArray(obj["src_ip_ranges"]), + } +} + +func flattenComputeFirewallPolicyRuleMatch(obj *compute.FirewallPolicyRuleMatch) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "layer4_configs": flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), + "dest_ip_ranges": obj.DestIPRanges, + "src_ip_ranges": obj.SrcIPRanges, + } + + return []interface{}{transformed} + +} +func expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []compute.FirewallPolicyRuleMatchLayer4Configs { + if o == nil { + return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0) + } + + items := make([]compute.FirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) + for _, item := range objs { + i := expandComputeFirewallPolicyRuleMatchLayer4Configs(item) + items = append(items, *i) + } + + return items +} + +func expandComputeFirewallPolicyRuleMatchLayer4Configs(o interface{}) *compute.FirewallPolicyRuleMatchLayer4Configs { + if o == nil { + return compute.EmptyFirewallPolicyRuleMatchLayer4Configs + } + + obj := o.(map[string]interface{}) + return &compute.FirewallPolicyRuleMatchLayer4Configs{ + IPProtocol: dcl.String(obj["ip_protocol"].(string)), + Ports: expandStringArray(obj["ports"]), + } +} + +func flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(objs []compute.FirewallPolicyRuleMatchLayer4Configs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenComputeFirewallPolicyRuleMatchLayer4Configs(&item) + items = append(items, i) + } + + return items +} + +func flattenComputeFirewallPolicyRuleMatchLayer4Configs(obj *compute.FirewallPolicyRuleMatchLayer4Configs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ip_protocol": obj.IPProtocol, + "ports": obj.Ports, + } + + return transformed + +} diff --git a/google/resource_compute_forwarding_rule.go b/google/resource_compute_forwarding_rule.go index fe99248061e..1341d828ba4 100644 --- a/google/resource_compute_forwarding_rule.go +++ b/google/resource_compute_forwarding_rule.go @@ -1,28 +1,30 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: MMv1 *** +// *** AUTO GENERATED CODE *** Type: DCL *** // // ---------------------------------------------------------------------------- // -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. // -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose // // ---------------------------------------------------------------------------- package google import ( + "context" "fmt" "log" - "reflect" - "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" ) func resourceComputeForwardingRule() *schema.Resource { @@ -37,410 +39,254 @@ func resourceComputeForwardingRule() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), }, Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + }, + + "all_ports": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. This field cannot be used with `port` or `portRange` fields. When the load balancing scheme is `INTERNAL` and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", + }, + + "allow_global_access": { + Type: schema.TypeBool, + Optional: true, + Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. If the field is set to `TRUE`, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", + }, + + "backend_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "An optional description of this resource. Provide this property when you create the resource.", }, + "ip_address": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, DiffSuppressFunc: internalIpDiffSuppress, - Description: `The IP address that this forwarding rule serves. When a client sends -traffic to this IP address, the forwarding rule directs the traffic to -the target that you specify in the forwarding rule. The -loadBalancingScheme and the forwarding rule's target determine the -type of IP address that you can use. For detailed information, refer -to [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - -An address can be specified either by a literal IP address or a -reference to an existing Address resource. If you don't specify a -reserved IP address, an ephemeral IP address is assigned. - -The value must be set to 0.0.0.0 when the target is a targetGrpcProxy -that has validateForProxyless field set to true. - -For Private Service Connect forwarding rules that forward traffic to -Google APIs, IP address must be provided.`, + Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", }, + "ip_protocol": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", "L3_DEFAULT", ""}, false), DiffSuppressFunc: caseDiffSuppress, - Description: `The IP protocol to which this rule applies. - -When the load balancing scheme is INTERNAL, only TCP and UDP are -valid. Possible values: ["TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", "L3_DEFAULT"]`, - }, - "all_ports": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `This field can be used with internal load balancer or network load balancer -when the forwarding rule references a backend service, or with the target -field when it references a TargetInstance. Set this to true to -allow packets addressed to any ports to be forwarded to the backends configured -with this forwarding rule. This can be used when the protocol is TCP/UDP, and it -must be set to true when the protocol is set to L3_DEFAULT. -Cannot be set if port or portRange are set.`, - }, - "allow_global_access": { - Type: schema.TypeBool, - Optional: true, - Description: `If true, clients can access ILB from all regions. -Otherwise only allows from the local region the ILB is located at.`, - }, - "backend_service": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A BackendService to receive the matched traffic. This is used only -for INTERNAL load balancing.`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, + Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", }, + "is_mirroring_collector": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates whether or not this load balancer can be used -as a collector for packet mirroring. To prevent mirroring loops, -instances behind this load balancer will not have their traffic -mirrored even if a PacketMirroring rule applies to them. This -can only be set to true for load balancers that have their -loadBalancingScheme set to INTERNAL.`, + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a `PacketMirroring` rule applies to them. This can only be set to true for load balancers that have their `loadBalancingScheme` set to `INTERNAL`.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels to apply to this rule.", + Elem: &schema.Schema{Type: schema.TypeString}, }, + "load_balancing_scheme": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED", ""}, false), - Description: `This signifies what the ForwardingRule will be used for and can be -EXTERNAL, EXTERNAL_MANAGED, INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic -Cloud VPN gateways, protocol forwarding to VMs from an external IP address, -and HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP load balancers. -INTERNAL is used for protocol forwarding to VMs from an internal IP address, -and internal TCP/UDP load balancers. -EXTERNAL_MANAGED is used for regional external HTTP(S) load balancers. -INTERNAL_MANAGED is used for internal HTTP(S) load balancers. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "EXTERNAL_MANAGED", "INTERNAL", "INTERNAL_MANAGED"]`, - Default: "EXTERNAL", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n* `EXTERNAL_MANAGED` is used for:\n * Global external HTTP(S) load balancers \n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL, EXTERNAL_MANAGED", + Default: "EXTERNAL", }, + "network": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `For internal load balancing, this field identifies the network that -the load balanced IP should belong to for this Forwarding Rule. If -this field is not specified, the default network will be used. -This field is only used for INTERNAL load balancing.`, + Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", }, + "network_tier": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD", ""}, false), - Description: `The networking tier used for configuring this address. If this field is not -specified, it is assumed to be PREMIUM. Possible values: ["PREMIUM", "STANDARD"]`, + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "This signifies the networking tier used for configuring this load balancer and can only take the following values: `PREMIUM`, `STANDARD`. For regional ForwardingRule, the valid values are `PREMIUM` and `STANDARD`. For GlobalForwardingRule, the valid value is `PREMIUM`. If this field is not specified, it is assumed to be `PREMIUM`. If `IPAddress` is specified, this value must be equal to the networkTier of the Address.", }, + "port_range": { Type: schema.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: portRangeDiffSuppress, - Description: `This field is used along with the target field for TargetHttpProxy, -TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, -TargetPool, TargetInstance. - -Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets -addressed to ports in the specified range will be forwarded to target. -Forwarding rules with the same [IPAddress, IPProtocol] pair must have -disjoint port ranges. - -Some types of forwarding target have constraints on the acceptable -ports: - -* TargetHttpProxy: 80, 8080 -* TargetHttpsProxy: 443 -* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 -* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 -* TargetVpnGateway: 500, 4500`, + Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", }, + "ports": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Description: `This field is used along with internal load balancing and network -load balancer when the forwarding rule references a backend service -and when protocol is not L3_DEFAULT. - -A single port or a comma separated list of ports can be configured. -Only packets addressed to these ports will be forwarded to the backends -configured with this forwarding rule. - -You can only use one of ports and portRange, or allPorts. -The three are mutually exclusive. - -You may specify a maximum of up to 5 ports, which can be non-contiguous.`, - MaxItems: 5, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "This field is used along with the `backend_service` field for internal load balancing. When the load balancing scheme is `INTERNAL`, a list of ports can be configured, for example, ['80'], ['8000','9000']. Only packets addressed to these ports are forwarded to the backends configured with the forwarding rule. If the forwarding rule's loadBalancingScheme is INTERNAL, you can specify ports in one of the following ways: * A list of up to five ports, which can be non-contiguous * Keyword `ALL`, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol. @pattern: d+(?:-d+)? For more information, refer to [Port specifications](/load-balancing/docs/forwarding-rule-concepts#port_specifications).", + MaxItems: 5, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project this resource belongs in.", + }, + "region": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the regional forwarding rule resides. -This field is not applicable to global forwarding rules.`, + Description: "The location of this resource.", }, + "service_label": { Type: schema.TypeString, Optional: true, ForceNew: true, + Description: "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", ValidateFunc: validateGCPName, - Description: `An optional prefix to the service name for this Forwarding Rule. -If specified, will be the first label of the fully qualified service -name. - -The label must be 1-63 characters long, and comply with RFC1035. -Specifically, the label must be 1-63 characters long and match the -regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first -character must be a lowercase letter, and all following characters -must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - -This field is only used for INTERNAL load balancing.`, }, + "subnetwork": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The subnetwork that the load balanced IP should belong to for this -Forwarding Rule. This field is only used for INTERNAL load balancing. - -If the network specified is in auto subnet mode, this field is -optional. However, if the network is in custom subnet mode, a -subnetwork must be specified.`, + Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule. If the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", }, + "target": { Type: schema.TypeString, Optional: true, DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The URL of the target resource to receive the matched traffic. -The target must live in the same region as the forwarding rule. -The forwarded traffic must be of a type appropriate to the target -object.`, + Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", }, + "creation_timestamp": { Type: schema.TypeString, Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "service_name": { - Type: schema.TypeString, - Computed: true, - Description: `The internal fully qualified service name for this Forwarding Rule. -This field is only used for INTERNAL load balancing.`, + Description: "[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: "Used internally during label updates.", }, + "self_link": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "[Output Only] Server-defined URL for the resource.", + }, + + "service_name": { + Type: schema.TypeString, + Computed: true, + Description: "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", }, }, - UseJSONNumber: true, } } func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - isMirroringCollectorProp, err := expandComputeForwardingRuleIsMirroringCollector(d.Get("is_mirroring_collector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_mirroring_collector"); !isEmptyValue(reflect.ValueOf(isMirroringCollectorProp)) && (ok || !reflect.DeepEqual(v, isMirroringCollectorProp)) { - obj["isMirroringCollector"] = isMirroringCollectorProp - } - descriptionProp, err := expandComputeForwardingRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - IPAddressProp, err := expandComputeForwardingRuleIPAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(IPAddressProp)) && (ok || !reflect.DeepEqual(v, IPAddressProp)) { - obj["IPAddress"] = IPAddressProp - } - IPProtocolProp, err := expandComputeForwardingRuleIPProtocol(d.Get("ip_protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_protocol"); !isEmptyValue(reflect.ValueOf(IPProtocolProp)) && (ok || !reflect.DeepEqual(v, IPProtocolProp)) { - obj["IPProtocol"] = IPProtocolProp - } - backendServiceProp, err := expandComputeForwardingRuleBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(reflect.ValueOf(backendServiceProp)) && (ok || !reflect.DeepEqual(v, backendServiceProp)) { - obj["backendService"] = backendServiceProp - } - loadBalancingSchemeProp, err := expandComputeForwardingRuleLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - nameProp, err := expandComputeForwardingRuleName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeForwardingRuleNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - portRangeProp, err := expandComputeForwardingRulePortRange(d.Get("port_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port_range"); !isEmptyValue(reflect.ValueOf(portRangeProp)) && (ok || !reflect.DeepEqual(v, portRangeProp)) { - obj["portRange"] = portRangeProp - } - portsProp, err := expandComputeForwardingRulePorts(d.Get("ports"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ports"); !isEmptyValue(reflect.ValueOf(portsProp)) && (ok || !reflect.DeepEqual(v, portsProp)) { - obj["ports"] = portsProp - } - subnetworkProp, err := expandComputeForwardingRuleSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(reflect.ValueOf(subnetworkProp)) && (ok || !reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - targetProp, err := expandComputeForwardingRuleTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - allowGlobalAccessProp, err := expandComputeForwardingRuleAllowGlobalAccess(d.Get("allow_global_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_global_access"); ok || !reflect.DeepEqual(v, allowGlobalAccessProp) { - obj["allowGlobalAccess"] = allowGlobalAccessProp - } - allPortsProp, err := expandComputeForwardingRuleAllPorts(d.Get("all_ports"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("all_ports"); !isEmptyValue(reflect.ValueOf(allPortsProp)) && (ok || !reflect.DeepEqual(v, allPortsProp)) { - obj["allPorts"] = allPortsProp - } - networkTierProp, err := expandComputeForwardingRuleNetworkTier(d.Get("network_tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_tier"); !isEmptyValue(reflect.ValueOf(networkTierProp)) && (ok || !reflect.DeepEqual(v, networkTierProp)) { - obj["networkTier"] = networkTierProp - } - serviceLabelProp, err := expandComputeForwardingRuleServiceLabel(d.Get("service_label"), d, config) + project, err := getProject(d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("service_label"); !isEmptyValue(reflect.ValueOf(serviceLabelProp)) && (ok || !reflect.DeepEqual(v, serviceLabelProp)) { - obj["serviceLabel"] = serviceLabelProp } - regionProp, err := expandComputeForwardingRuleRegion(d.Get("region"), d, config) + region, err := getRegion(d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules") - if err != nil { - return err + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + AllPorts: dcl.Bool(d.Get("all_ports").(bool)), + AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), + BackendService: dcl.String(d.Get("backend_service").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + Network: dcl.StringOrNil(d.Get("network").(string)), + NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Ports: expandStringArray(d.Get("ports")), + Project: dcl.String(project), + Location: dcl.String(region), + ServiceLabel: dcl.String(d.Get("service_label").(string)), + Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), + Target: dcl.String(d.Get("target").(string)), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) } - - log.Printf("[DEBUG] Creating new ForwardingRule: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { - return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + return err } - billingProject = project - + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating ForwardingRule", userAgent, - d.Timeout(schema.TimeoutCreate)) + res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) - if err != nil { + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create ForwardingRule: %s", err) + return fmt.Errorf("Error creating ForwardingRule: %s", err) } log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) @@ -450,235 +296,251 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) + project, err := getProject(d, config) if err != nil { return err } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + region, err := getRegion(d, config) if err != nil { return err } - billingProject := "" + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + AllPorts: dcl.Bool(d.Get("all_ports").(bool)), + AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), + BackendService: dcl.String(d.Get("backend_service").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + Network: dcl.StringOrNil(d.Get("network").(string)), + NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Ports: expandStringArray(d.Get("ports")), + Project: dcl.String(project), + Location: dcl.String(region), + ServiceLabel: dcl.String(d.Get("service_label").(string)), + Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), + Target: dcl.String(d.Get("target").(string)), + } - project, err := getProject(d, config) + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { - return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + return err } - billingProject = project - + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetForwardingRule(context.Background(), obj) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeForwardingRule %q", d.Id())) + resourceName := fmt.Sprintf("ComputeForwardingRule %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) } - - if err := d.Set("creation_timestamp", flattenComputeForwardingRuleCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("all_ports", res.AllPorts); err != nil { + return fmt.Errorf("error setting all_ports in state: %s", err) } - if err := d.Set("is_mirroring_collector", flattenComputeForwardingRuleIsMirroringCollector(res["isMirroringCollector"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("allow_global_access", res.AllowGlobalAccess); err != nil { + return fmt.Errorf("error setting allow_global_access in state: %s", err) } - if err := d.Set("description", flattenComputeForwardingRuleDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("backend_service", res.BackendService); err != nil { + return fmt.Errorf("error setting backend_service in state: %s", err) } - if err := d.Set("ip_address", flattenComputeForwardingRuleIPAddress(res["IPAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) } - if err := d.Set("ip_protocol", flattenComputeForwardingRuleIPProtocol(res["IPProtocol"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("ip_address", res.IPAddress); err != nil { + return fmt.Errorf("error setting ip_address in state: %s", err) } - if err := d.Set("backend_service", flattenComputeForwardingRuleBackendService(res["backendService"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("ip_protocol", res.IPProtocol); err != nil { + return fmt.Errorf("error setting ip_protocol in state: %s", err) } - if err := d.Set("load_balancing_scheme", flattenComputeForwardingRuleLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("is_mirroring_collector", res.IsMirroringCollector); err != nil { + return fmt.Errorf("error setting is_mirroring_collector in state: %s", err) } - if err := d.Set("name", flattenComputeForwardingRuleName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) } - if err := d.Set("network", flattenComputeForwardingRuleNetwork(res["network"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { + return fmt.Errorf("error setting load_balancing_scheme in state: %s", err) } - if err := d.Set("port_range", flattenComputeForwardingRulePortRange(res["portRange"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("network", res.Network); err != nil { + return fmt.Errorf("error setting network in state: %s", err) } - if err := d.Set("ports", flattenComputeForwardingRulePorts(res["ports"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("network_tier", res.NetworkTier); err != nil { + return fmt.Errorf("error setting network_tier in state: %s", err) } - if err := d.Set("subnetwork", flattenComputeForwardingRuleSubnetwork(res["subnetwork"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("port_range", res.PortRange); err != nil { + return fmt.Errorf("error setting port_range in state: %s", err) } - if err := d.Set("target", flattenComputeForwardingRuleTarget(res["target"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("ports", res.Ports); err != nil { + return fmt.Errorf("error setting ports in state: %s", err) } - if err := d.Set("allow_global_access", flattenComputeForwardingRuleAllowGlobalAccess(res["allowGlobalAccess"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) } - if err := d.Set("all_ports", flattenComputeForwardingRuleAllPorts(res["allPorts"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("region", res.Location); err != nil { + return fmt.Errorf("error setting region in state: %s", err) } - if err := d.Set("network_tier", flattenComputeForwardingRuleNetworkTier(res["networkTier"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("service_label", res.ServiceLabel); err != nil { + return fmt.Errorf("error setting service_label in state: %s", err) } - if err := d.Set("service_label", flattenComputeForwardingRuleServiceLabel(res["serviceLabel"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("subnetwork", res.Subnetwork); err != nil { + return fmt.Errorf("error setting subnetwork in state: %s", err) } - if err := d.Set("service_name", flattenComputeForwardingRuleServiceName(res["serviceName"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("target", res.Target); err != nil { + return fmt.Errorf("error setting target in state: %s", err) } - if err := d.Set("region", flattenComputeForwardingRuleRegion(res["region"], d, config)); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("creation_timestamp", res.CreationTimestamp); err != nil { + return fmt.Errorf("error setting creation_timestamp in state: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading ForwardingRule: %s", err) + if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { + return fmt.Errorf("error setting label_fingerprint in state: %s", err) + } + if err = d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("error setting self_link in state: %s", err) + } + if err = d.Set("service_name", res.ServiceName); err != nil { + return fmt.Errorf("error setting service_name in state: %s", err) } return nil } - func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + AllPorts: dcl.Bool(d.Get("all_ports").(bool)), + AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), + BackendService: dcl.String(d.Get("backend_service").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + Network: dcl.StringOrNil(d.Get("network").(string)), + NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Ports: expandStringArray(d.Get("ports")), + Project: dcl.String(project), + Location: dcl.String(region), + ServiceLabel: dcl.String(d.Get("service_label").(string)), + Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), + Target: dcl.String(d.Get("target").(string)), + } + directive := UpdateDirective userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err } billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("target") { - obj := make(map[string]interface{}) - - targetProp, err := expandComputeForwardingRuleTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating ForwardingRule", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("allow_global_access") { - obj := make(map[string]interface{}) - - allowGlobalAccessProp, err := expandComputeForwardingRuleAllowGlobalAccess(d.Get("allow_global_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_global_access"); ok || !reflect.DeepEqual(v, allowGlobalAccessProp) { - obj["allowGlobalAccess"] = allowGlobalAccessProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating ForwardingRule", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating ForwardingRule: %s", err) + } + + log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) return resourceComputeForwardingRuleRead(d, meta) } func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) + project, err := getProject(d, config) if err != nil { return err } - - billingProject := "" - - project, err := getProject(d, config) + region, err := getRegion(d, config) if err != nil { - return fmt.Errorf("Error fetching project for ForwardingRule: %s", err) + return err } - billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return err + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + AllPorts: dcl.Bool(d.Get("all_ports").(bool)), + AllowGlobalAccess: dcl.Bool(d.Get("allow_global_access").(bool)), + BackendService: dcl.String(d.Get("backend_service").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IsMirroringCollector: dcl.Bool(d.Get("is_mirroring_collector").(bool)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + Network: dcl.StringOrNil(d.Get("network").(string)), + NetworkTier: compute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Ports: expandStringArray(d.Get("ports")), + Project: dcl.String(project), + Location: dcl.String(region), + ServiceLabel: dcl.String(d.Get("service_label").(string)), + Subnetwork: dcl.StringOrNil(d.Get("subnetwork").(string)), + Target: dcl.String(d.Get("target").(string)), } - var obj map[string]interface{} log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) - + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ForwardingRule") + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp } - - err = computeOperationWaitTime( - config, res, project, "Deleting ForwardingRule", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err + if err := client.DeleteForwardingRule(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - log.Printf("[DEBUG] Finished deleting ForwardingRule %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) return nil } @@ -694,7 +556,7 @@ func resourceComputeForwardingRuleImport(d *schema.ResourceData, meta interface{ } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -702,232 +564,3 @@ func resourceComputeForwardingRuleImport(d *schema.ResourceData, meta interface{ return []*schema.ResourceData{d}, nil } - -func flattenComputeForwardingRuleCreationTimestamp(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleIsMirroringCollector(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleIPAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleIPProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleBackendService(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeForwardingRuleLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeForwardingRulePortRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRulePorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return schema.NewSet(schema.HashString, v.([]interface{})) -} - -func flattenComputeForwardingRuleSubnetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeForwardingRuleTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleAllowGlobalAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleAllPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleNetworkTier(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleServiceLabel(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleServiceName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeForwardingRuleRegion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeForwardingRuleIsMirroringCollector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleIPAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleIPProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - // This method returns a full self link from a partial self link. - if v == nil || v.(string) == "" { - // It does not try to construct anything from empty. - return "", nil - } else if strings.HasPrefix(v.(string), "https://") { - // Anything that starts with a URL scheme is assumed to be a self link worth using. - return v, nil - } else if strings.HasPrefix(v.(string), "projects/") { - // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { - // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. - // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job - // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil -} - -func expandComputeForwardingRuleLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeForwardingRulePortRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRulePorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v.(*schema.Set).List(), nil -} - -func expandComputeForwardingRuleSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeForwardingRuleTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - // This method returns a full self link from a partial self link. - if v == nil || v.(string) == "" { - // It does not try to construct anything from empty. - return "", nil - } else if strings.HasPrefix(v.(string), "https://") { - // Anything that starts with a URL scheme is assumed to be a self link worth using. - return v, nil - } else if strings.HasPrefix(v.(string), "projects/") { - // If the self link references a project, we'll just stuck the compute prefix on it - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if strings.HasPrefix(v.(string), "regions/") || strings.HasPrefix(v.(string), "zones/") { - // For regional or zonal resources which include their region or zone, just put the project in front. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - // Anything else is assumed to be a regional resource, with a partial link that begins with the resource name. - // This isn't very likely - it's a last-ditch effort to extract something useful here. We can do a better job - // as soon as MultiResourceRefs are working since we'll know the types that this field is supposed to point to. - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil -} - -func expandComputeForwardingRuleAllowGlobalAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleAllPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleNetworkTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleServiceLabel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeForwardingRuleRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} diff --git a/google/resource_compute_forwarding_rule_sweeper_test.go b/google/resource_compute_forwarding_rule_sweeper_test.go index cb3c5a941a2..a997b6fc8f2 100644 --- a/google/resource_compute_forwarding_rule_sweeper_test.go +++ b/google/resource_compute_forwarding_rule_sweeper_test.go @@ -1,14 +1,15 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: MMv1 *** +// *** AUTO GENERATED CODE *** Type: DCL *** // // ---------------------------------------------------------------------------- // -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. // -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose // // ---------------------------------------------------------------------------- @@ -17,9 +18,9 @@ package google import ( "context" "log" - "strings" "testing" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -30,10 +31,8 @@ func init() { }) } -// At the time of writing, the CI only passes us-central1 as the region func testSweepComputeForwardingRule(region string) error { - resourceName := "ComputeForwardingRule" - log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ComputeForwardingRule") config, err := sharedConfigForRegion(region) if err != nil { @@ -50,75 +49,23 @@ func testSweepComputeForwardingRule(region string) error { t := &testing.T{} billingId := getTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, } - listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/forwardingRules", "?")[0] - listUrl, err := replaceVars(d, config, listTemplate) + client := NewDCLComputeClient(config, config.userAgent, "", 0) + err = client.DeleteAllForwardingRule(context.Background(), d["project"], d["location"], isDeletableComputeForwardingRule) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["items"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil - } - - name := GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !isSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}" - deleteUrl, err := replaceVars(d, config, deleteTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil - } - deleteUrl = deleteUrl + name - - // Don't wait on operations as we may have a lot to delete - _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) - } - } - - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + return err } - return nil } + +func isDeletableComputeForwardingRule(r *compute.ForwardingRule) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_compute_global_forwarding_rule.go b/google/resource_compute_global_forwarding_rule.go index 9ec7061c106..034ad98e54f 100644 --- a/google/resource_compute_global_forwarding_rule.go +++ b/google/resource_compute_global_forwarding_rule.go @@ -1,27 +1,30 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: MMv1 *** +// *** AUTO GENERATED CODE *** Type: DCL *** // // ---------------------------------------------------------------------------- // -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. // -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose // // ---------------------------------------------------------------------------- package google import ( + "context" "fmt" "log" - "reflect" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" ) func resourceComputeGlobalForwardingRule() *schema.Resource { @@ -36,484 +39,411 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(4 * time.Minute), - Update: schema.DefaultTimeout(4 * time.Minute), - Delete: schema.DefaultTimeout(4 * time.Minute), + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), }, Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", }, + "target": { Type: schema.TypeString, Required: true, DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The URL of the target resource to receive the matched traffic. -The forwarded traffic must be of a type appropriate to the target object. -For INTERNAL_SELF_MANAGED load balancing, only HTTP and HTTPS targets -are valid. + Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", + }, -([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) For global address with a purpose of PRIVATE_SERVICE_CONNECT and -addressType of INTERNAL, only "all-apis" and "vpc-sc" are valid.`, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "An optional description of this resource. Provide this property when you create the resource.", }, + "ip_address": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, DiffSuppressFunc: internalIpDiffSuppress, - Description: `The IP address that this forwarding rule serves. When a client sends -traffic to this IP address, the forwarding rule directs the traffic to -the target that you specify in the forwarding rule. The -loadBalancingScheme and the forwarding rule's target determine the -type of IP address that you can use. For detailed information, refer -to [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - -An address can be specified either by a literal IP address or a -reference to an existing Address resource. If you don't specify a -reserved IP address, an ephemeral IP address is assigned. - -The value must be set to 0.0.0.0 when the target is a targetGrpcProxy -that has validateForProxyless field set to true. - -For Private Service Connect forwarding rules that forward traffic to -Google APIs, IP address must be provided.`, + Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", }, + "ip_protocol": { Type: schema.TypeString, Computed: true, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"TCP", "UDP", "ESP", "AH", "SCTP", "ICMP", ""}, false), DiffSuppressFunc: caseDiffSuppress, - Description: `The IP protocol to which this rule applies. When the load balancing scheme is -INTERNAL_SELF_MANAGED, only TCP is valid. This field must not be set if the -global address is configured as a purpose of PRIVATE_SERVICE_CONNECT -and addressType of INTERNAL Possible values: ["TCP", "UDP", "ESP", "AH", "SCTP", "ICMP"]`, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, + Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", }, + "ip_version": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV6", ""}, false), - Description: `The IP Version that will be used by this global forwarding rule. Possible values: ["IPV4", "IPV6"]`, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The IP Version that will be used by this forwarding rule. Valid options are `IPV4` or `IPV6`. This can only be specified for an external global forwarding rule. Possible values: UNSPECIFIED_VERSION, IPV4, IPV6", }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels to apply to this rule.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "load_balancing_scheme": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"EXTERNAL", "INTERNAL_SELF_MANAGED", ""}, false), - Description: `This signifies what the GlobalForwardingRule will be used for. -The value of INTERNAL_SELF_MANAGED means that this will be used for -Internal Global HTTP(S) LB. The value of EXTERNAL means that this -will be used for External Global Load Balancing (HTTP(S) LB, -External TCP/UDP LB, SSL Proxy) - -([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) Note: This field must be set "" if the global address is -configured as a purpose of PRIVATE_SERVICE_CONNECT and addressType of INTERNAL. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "INTERNAL_SELF_MANAGED"]`, - Default: "EXTERNAL", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n* `EXTERNAL_MANAGED` is used for:\n * Global external HTTP(S) load balancers \n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL, EXTERNAL_MANAGED", + Default: "EXTERNAL", }, + "metadata_filters": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Opaque filter criteria used by Loadbalancer to restrict routing -configuration to a limited set xDS compliant clients. In their xDS -requests to Loadbalancer, xDS clients present node metadata. If a -match takes place, the relevant routing configuration is made available -to those proxies. - -For each metadataFilter in this list, if its filterMatchCriteria is set -to MATCH_ANY, at least one of the filterLabels must match the -corresponding label provided in the metadata. If its filterMatchCriteria -is set to MATCH_ALL, then all of its filterLabels must match with -corresponding labels in the provided metadata. - -metadataFilters specified here can be overridden by those specified in -the UrlMap that this ForwardingRule references. - -metadataFilters only applies to Loadbalancers that have their -loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter_labels": { - Type: schema.TypeList, - Required: true, - ForceNew: true, - Description: `The list of label value pairs that must match labels in the -provided metadata based on filterMatchCriteria - -This list must not be empty and can have at the most 64 entries.`, - MinItems: 1, - MaxItems: 64, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the metadata label. The length must be between -1 and 1024 characters, inclusive.`, - }, - "value": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The value that the label must match. The value has a maximum -length of 1024 characters.`, - }, - }, - }, - }, - "filter_match_criteria": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"MATCH_ANY", "MATCH_ALL"}, false), - Description: `Specifies how individual filterLabel matches within the list of -filterLabels contribute towards the overall metadataFilter match. - -MATCH_ANY - At least one of the filterLabels must have a matching -label in the provided metadata. -MATCH_ALL - All filterLabels must have matching labels in the -provided metadata. Possible values: ["MATCH_ANY", "MATCH_ALL"]`, - }, - }, - }, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of [xDS](https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md) compliant clients. In their xDS requests to Loadbalancer, xDS clients present [node metadata](https://github.com/envoyproxy/data-plane-api/search?q=%22message+Node%22+in%3A%2Fenvoy%2Fapi%2Fv2%2Fcore%2Fbase.proto&). If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. `TargetHttpProxy`, `UrlMap`) referenced by the `ForwardingRule` will not be visible to those proxies.\n\nFor each `metadataFilter` in this list, if its `filterMatchCriteria` is set to MATCH_ANY, at least one of the `filterLabel`s must match the corresponding label provided in the metadata. If its `filterMatchCriteria` is set to MATCH_ALL, then all of its `filterLabel`s must match with corresponding labels provided in the metadata.\n\n`metadataFilters` specified here will be applifed before those specified in the `UrlMap` that this `ForwardingRule` references.\n\n`metadataFilters` only applies to Loadbalancers that have their loadBalancingScheme set to `INTERNAL_SELF_MANAGED`.", + Elem: ComputeGlobalForwardingRuleMetadataFilterSchema(), + }, + + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", }, + "port_range": { Type: schema.TypeString, Optional: true, ForceNew: true, DiffSuppressFunc: portRangeDiffSuppress, - Description: `This field is used along with the target field for TargetHttpProxy, -TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, -TargetPool, TargetInstance. - -Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets -addressed to ports in the specified range will be forwarded to target. -Forwarding rules with the same [IPAddress, IPProtocol] pair must have -disjoint port ranges. - -Some types of forwarding target have constraints on the acceptable -ports: - -* TargetHttpProxy: 80, 8080 -* TargetHttpsProxy: 443 -* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 -* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 -* TargetVpnGateway: 500, 4500`, + Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", }, + "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project this resource belongs in.", + }, + + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: "Used internally during label updates.", }, + "self_link": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "[Output Only] Server-defined URL for the resource.", }, }, - UseJSONNumber: true, } } -func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } +func ComputeGlobalForwardingRuleMetadataFilterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter_labels": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The list of label value pairs that must match labels in the provided metadata based on `filterMatchCriteria`\n\nThis list must not be empty and can have at the most 64 entries.", + MaxItems: 64, + MinItems: 1, + Elem: ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema(), + }, - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeGlobalForwardingRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - IPAddressProp, err := expandComputeGlobalForwardingRuleIPAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(reflect.ValueOf(IPAddressProp)) && (ok || !reflect.DeepEqual(v, IPAddressProp)) { - obj["IPAddress"] = IPAddressProp - } - IPProtocolProp, err := expandComputeGlobalForwardingRuleIPProtocol(d.Get("ip_protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_protocol"); !isEmptyValue(reflect.ValueOf(IPProtocolProp)) && (ok || !reflect.DeepEqual(v, IPProtocolProp)) { - obj["IPProtocol"] = IPProtocolProp - } - ipVersionProp, err := expandComputeGlobalForwardingRuleIpVersion(d.Get("ip_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_version"); !isEmptyValue(reflect.ValueOf(ipVersionProp)) && (ok || !reflect.DeepEqual(v, ipVersionProp)) { - obj["ipVersion"] = ipVersionProp - } - loadBalancingSchemeProp, err := expandComputeGlobalForwardingRuleLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - metadataFiltersProp, err := expandComputeGlobalForwardingRuleMetadataFilters(d.Get("metadata_filters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata_filters"); !isEmptyValue(reflect.ValueOf(metadataFiltersProp)) && (ok || !reflect.DeepEqual(v, metadataFiltersProp)) { - obj["metadataFilters"] = metadataFiltersProp + "filter_match_criteria": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Specifies how individual `filterLabel` matches within the list of `filterLabel`s contribute towards the overall `metadataFilter` match.\n\nSupported values are:\n\n* MATCH_ANY: At least one of the `filterLabels` must have a matching label in the provided metadata.\n* MATCH_ALL: All `filterLabels` must have matching labels in the provided metadata. Possible values: NOT_SET, MATCH_ALL, MATCH_ANY", + }, + }, } - nameProp, err := expandComputeGlobalForwardingRuleName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp +} + +func ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of metadata label.\n\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", + }, + + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The value of the label must match the specified value.\n\nvalue can have a maximum length of 1024 characters.", + }, + }, } - portRangeProp, err := expandComputeGlobalForwardingRulePortRange(d.Get("port_range"), d, config) +} + +func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) if err != nil { return err - } else if v, ok := d.GetOkExists("port_range"); !isEmptyValue(reflect.ValueOf(portRangeProp)) && (ok || !reflect.DeepEqual(v, portRangeProp)) { - obj["portRange"] = portRangeProp } - targetProp, err := expandComputeGlobalForwardingRuleTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(targetProp)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp + + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + Target: dcl.String(d.Get("target").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), + Network: dcl.StringOrNil(d.Get("network").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Project: dcl.String(project), } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules") + id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") if err != nil { - return err + return fmt.Errorf("error constructing id: %s", err) } - - log.Printf("[DEBUG] Creating new GlobalForwardingRule: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { - return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + return err } - billingProject = project - + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("Error creating GlobalForwardingRule: %s", err) - } - - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") - if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating GlobalForwardingRule", userAgent, - d.Timeout(schema.TimeoutCreate)) + res, err := client.ApplyForwardingRule(context.Background(), obj, createDirective...) - if err != nil { + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { // The resource didn't actually create d.SetId("") - return fmt.Errorf("Error waiting to create GlobalForwardingRule: %s", err) + return fmt.Errorf("Error creating ForwardingRule: %s", err) } - log.Printf("[DEBUG] Finished creating GlobalForwardingRule %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) return resourceComputeGlobalForwardingRuleRead(d, meta) } func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) + project, err := getProject(d, config) if err != nil { return err } - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}") - if err != nil { - return err + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + Target: dcl.String(d.Get("target").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), + Network: dcl.StringOrNil(d.Get("network").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Project: dcl.String(project), } - billingProject := "" - - project, err := getProject(d, config) + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { - return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + return err } - billingProject = project - + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetForwardingRule(context.Background(), obj) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("ComputeGlobalForwardingRule %q", d.Id())) + resourceName := fmt.Sprintf("ComputeGlobalForwardingRule %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) } - - if err := d.Set("description", flattenComputeGlobalForwardingRuleDescription(res["description"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("target", res.Target); err != nil { + return fmt.Errorf("error setting target in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("ip_address", res.IPAddress); err != nil { + return fmt.Errorf("error setting ip_address in state: %s", err) } - if err := d.Set("ip_address", flattenComputeGlobalForwardingRuleIPAddress(res["IPAddress"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("ip_protocol", res.IPProtocol); err != nil { + return fmt.Errorf("error setting ip_protocol in state: %s", err) } - if err := d.Set("ip_protocol", flattenComputeGlobalForwardingRuleIPProtocol(res["IPProtocol"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("ip_version", res.IPVersion); err != nil { + return fmt.Errorf("error setting ip_version in state: %s", err) } - if err := d.Set("ip_version", flattenComputeGlobalForwardingRuleIpVersion(res["ipVersion"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) } - if err := d.Set("load_balancing_scheme", flattenComputeGlobalForwardingRuleLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { + return fmt.Errorf("error setting load_balancing_scheme in state: %s", err) } - if err := d.Set("metadata_filters", flattenComputeGlobalForwardingRuleMetadataFilters(res["metadataFilters"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("metadata_filters", flattenComputeGlobalForwardingRuleMetadataFilterArray(res.MetadataFilter)); err != nil { + return fmt.Errorf("error setting metadata_filters in state: %s", err) } - if err := d.Set("name", flattenComputeGlobalForwardingRuleName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("network", res.Network); err != nil { + return fmt.Errorf("error setting network in state: %s", err) } - if err := d.Set("port_range", flattenComputeGlobalForwardingRulePortRange(res["portRange"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("port_range", res.PortRange); err != nil { + return fmt.Errorf("error setting port_range in state: %s", err) } - if err := d.Set("target", flattenComputeGlobalForwardingRuleTarget(res["target"], d, config)); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { + return fmt.Errorf("error setting label_fingerprint in state: %s", err) + } + if err = d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("error setting self_link in state: %s", err) } return nil } - func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) + project, err := getProject(d, config) if err != nil { return err } - billingProject := "" - - project, err := getProject(d, config) + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + Target: dcl.String(d.Get("target").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), + Network: dcl.StringOrNil(d.Get("network").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { - return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + return err } - billingProject = project - d.Partial(true) - - if d.HasChange("target") { - obj := make(map[string]interface{}) - - targetProp, err := expandComputeGlobalForwardingRuleTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}/setTarget") - if err != nil { - return err - } - - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("Error updating GlobalForwardingRule %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating GlobalForwardingRule %q: %#v", d.Id(), res) - } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyForwardingRule(context.Background(), obj, directive...) - err = computeOperationWaitTime( - config, res, project, "Updating GlobalForwardingRule", userAgent, - d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating ForwardingRule: %s", err) } - d.Partial(false) + log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) return resourceComputeGlobalForwardingRuleRead(d, meta) } func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) + project, err := getProject(d, config) if err != nil { return err } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for GlobalForwardingRule: %s", err) + obj := &compute.ForwardingRule{ + Name: dcl.String(d.Get("name").(string)), + Target: dcl.String(d.Get("target").(string)), + Description: dcl.String(d.Get("description").(string)), + IPAddress: dcl.StringOrNil(d.Get("ip_address").(string)), + IPProtocol: compute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), + IPVersion: compute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), + Labels: checkStringMap(d.Get("labels")), + LoadBalancingScheme: compute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), + MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), + Network: dcl.StringOrNil(d.Get("network").(string)), + PortRange: dcl.String(d.Get("port_range").(string)), + Project: dcl.String(project), } - billingProject = project - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/forwardingRules/{{name}}") + log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { return err } - - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting GlobalForwardingRule %q", d.Id()) - + billingProject := project // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { billingProject = bp } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalForwardingRule") + client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp } - - err = computeOperationWaitTime( - config, res, project, "Deleting GlobalForwardingRule", userAgent, - d.Timeout(schema.TimeoutDelete)) - - if err != nil { - return err + if err := client.DeleteForwardingRule(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - log.Printf("[DEBUG] Finished deleting GlobalForwardingRule %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) return nil } @@ -528,7 +458,7 @@ func resourceComputeGlobalForwardingRuleImport(d *schema.ResourceData, meta inte } // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") + id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") if err != nil { return nil, fmt.Errorf("Error constructing id: %s", err) } @@ -537,186 +467,117 @@ func resourceComputeGlobalForwardingRuleImport(d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func flattenComputeGlobalForwardingRuleDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalForwardingRuleIPAddress(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} +func expandComputeGlobalForwardingRuleMetadataFilterArray(o interface{}) []compute.ForwardingRuleMetadataFilter { + if o == nil { + return make([]compute.ForwardingRuleMetadataFilter, 0) + } -func flattenComputeGlobalForwardingRuleIPProtocol(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]compute.ForwardingRuleMetadataFilter, 0) + } -func flattenComputeGlobalForwardingRuleIpVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} + items := make([]compute.ForwardingRuleMetadataFilter, 0, len(objs)) + for _, item := range objs { + i := expandComputeGlobalForwardingRuleMetadataFilter(item) + items = append(items, *i) + } -func flattenComputeGlobalForwardingRuleLoadBalancingScheme(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + return items } -func flattenComputeGlobalForwardingRuleMetadataFilters(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "filter_match_criteria": flattenComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(original["filterMatchCriteria"], d, config), - "filter_labels": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabels(original["filterLabels"], d, config), - }) +func expandComputeGlobalForwardingRuleMetadataFilter(o interface{}) *compute.ForwardingRuleMetadataFilter { + if o == nil { + return compute.EmptyForwardingRuleMetadataFilter } - return transformed -} -func flattenComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} -func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(original["name"], d, config), - "value": flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(original["value"], d, config), - }) + obj := o.(map[string]interface{}) + return &compute.ForwardingRuleMetadataFilter{ + FilterLabel: expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj["filter_labels"]), + FilterMatchCriteria: compute.ForwardingRuleMetadataFilterFilterMatchCriteriaEnumRef(obj["filter_match_criteria"].(string)), } - return transformed -} -func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v } -func flattenComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} +func flattenComputeGlobalForwardingRuleMetadataFilterArray(objs []compute.ForwardingRuleMetadataFilter) []interface{} { + if objs == nil { + return nil + } -func flattenComputeGlobalForwardingRuleName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} + items := []interface{}{} + for _, item := range objs { + i := flattenComputeGlobalForwardingRuleMetadataFilter(&item) + items = append(items, i) + } -func flattenComputeGlobalForwardingRulePortRange(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + return items } -func flattenComputeGlobalForwardingRuleTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} +func flattenComputeGlobalForwardingRuleMetadataFilter(obj *compute.ForwardingRuleMetadataFilter) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "filter_labels": flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj.FilterLabel), + "filter_match_criteria": obj.FilterMatchCriteria, + } -func expandComputeGlobalForwardingRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + return transformed -func expandComputeGlobalForwardingRuleIPAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil } +func expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(o interface{}) []compute.ForwardingRuleMetadataFilterFilterLabel { + if o == nil { + return make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0) + } -func expandComputeGlobalForwardingRuleIPProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0) + } -func expandComputeGlobalForwardingRuleIpVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + items := make([]compute.ForwardingRuleMetadataFilterFilterLabel, 0, len(objs)) + for _, item := range objs { + i := expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(item) + items = append(items, *i) + } -func expandComputeGlobalForwardingRuleLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + return items } -func expandComputeGlobalForwardingRuleMetadataFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilterMatchCriteria, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !isEmptyValue(val) { - transformed["filterMatchCriteria"] = transformedFilterMatchCriteria - } - - transformedFilterLabels, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabels(original["filter_labels"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFilterLabels); val.IsValid() && !isEmptyValue(val) { - transformed["filterLabels"] = transformedFilterLabels - } - - req = append(req, transformed) - } - return req, nil -} +func expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(o interface{}) *compute.ForwardingRuleMetadataFilterFilterLabel { + if o == nil { + return compute.EmptyForwardingRuleMetadataFilterFilterLabel + } -func expandComputeGlobalForwardingRuleMetadataFiltersFilterMatchCriteria(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + obj := o.(map[string]interface{}) + return &compute.ForwardingRuleMetadataFilterFilterLabel{ + Name: dcl.String(obj["name"].(string)), + Value: dcl.String(obj["value"].(string)), + } } -func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} +func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(objs []compute.ForwardingRuleMetadataFilterFilterLabel) []interface{} { + if objs == nil { + return nil + } -func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + items := []interface{}{} + for _, item := range objs { + i := flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(&item) + items = append(items, i) + } -func expandComputeGlobalForwardingRuleMetadataFiltersFilterLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + return items } -func expandComputeGlobalForwardingRuleName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} +func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(obj *compute.ForwardingRuleMetadataFilterFilterLabel) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + "value": obj.Value, + } -func expandComputeGlobalForwardingRulePortRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + return transformed -func expandComputeGlobalForwardingRuleTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil } diff --git a/google/resource_compute_global_forwarding_rule_sweeper_test.go b/google/resource_compute_global_forwarding_rule_sweeper_test.go index 0f98286a741..9d1471c4b84 100644 --- a/google/resource_compute_global_forwarding_rule_sweeper_test.go +++ b/google/resource_compute_global_forwarding_rule_sweeper_test.go @@ -1,14 +1,15 @@ // ---------------------------------------------------------------------------- // -// *** AUTO GENERATED CODE *** Type: MMv1 *** +// *** AUTO GENERATED CODE *** Type: DCL *** // // ---------------------------------------------------------------------------- // -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. // -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose // // ---------------------------------------------------------------------------- @@ -17,9 +18,9 @@ package google import ( "context" "log" - "strings" "testing" + compute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -30,10 +31,8 @@ func init() { }) } -// At the time of writing, the CI only passes us-central1 as the region func testSweepComputeGlobalForwardingRule(region string) error { - resourceName := "ComputeGlobalForwardingRule" - log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ComputeGlobalForwardingRule") config, err := sharedConfigForRegion(region) if err != nil { @@ -50,75 +49,23 @@ func testSweepComputeGlobalForwardingRule(region string) error { t := &testing.T{} billingId := getTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, } - listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/global/forwardingRules", "?")[0] - listUrl, err := replaceVars(d, config, listTemplate) + client := NewDCLComputeClient(config, config.userAgent, "", 0) + err = client.DeleteAllForwardingRule(context.Background(), d["project"], d["location"], isDeletableComputeGlobalForwardingRule) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["items"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil - } - - name := GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !isSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/global/forwardingRules/{{name}}" - deleteUrl, err := replaceVars(d, config, deleteTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil - } - deleteUrl = deleteUrl + name - - // Don't wait on operations as we may have a lot to delete - _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) - } - } - - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + return err } - return nil } + +func isDeletableComputeGlobalForwardingRule(r *compute.ForwardingRule) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_container_aws_cluster.go b/google/resource_container_aws_cluster.go new file mode 100644 index 00000000000..979175e9972 --- /dev/null +++ b/google/resource_container_aws_cluster.go @@ -0,0 +1,1220 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +) + +func resourceContainerAwsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsClusterCreate, + Read: resourceContainerAwsClusterRead, + Update: resourceContainerAwsClusterUpdate, + Delete: resourceContainerAwsClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAwsClusterAuthorizationSchema(), + }, + + "aws_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Required. Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterNetworkingSchema(), + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAwsClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. At most one user can be specified. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAwsClusterAuthorizationAdminUsersSchema(), + }, + }, + } +} + +func ContainerAwsClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_services_authentication": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Authentication configuration for management of AWS resources.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema(), + }, + + "config_encryption": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt cluster configuration.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneConfigEncryptionSchema(), + }, + + "database_encryption": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt cluster secrets.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the AWS IAM instance pofile to assign to each control plane replica.", + }, + + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "Required. The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .", + }, + + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `t3.medium`.", + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneProxyConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAwsClusterControlPlaneSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ContainerAwsClusterControlPlaneAwsServicesAuthenticationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account.", + }, + + "role_session_name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt cluster configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt cluster secrets.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + + "secret_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration.", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAwsClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation.", + }, + }, + } +} + +func ContainerAwsClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAwsClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsCluster %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAwsClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("aws_region", res.AwsRegion); err != nil { + return fmt.Errorf("error setting aws_region in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAwsClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAwsClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAwsClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAwsClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAwsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAwsClusterRead(d, meta) +} + +func resourceContainerAwsClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.Cluster{ + Authorization: expandContainerAwsClusterAuthorization(d.Get("authorization")), + AwsRegion: dcl.String(d.Get("aws_region").(string)), + ControlPlane: expandContainerAwsClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAwsClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAwsClusterNetworking(d.Get("networking")), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAwsClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsClusterAuthorization(o interface{}) *containeraws.ClusterAuthorization { + if o == nil { + return containeraws.EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterAuthorization{ + AdminUsers: expandContainerAwsClusterAuthorizationAdminUsersArray(obj["admin_users"]), + } +} + +func flattenContainerAwsClusterAuthorization(obj *containeraws.ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAwsClusterAuthorizationAdminUsersArray(obj.AdminUsers), + } + + return []interface{}{transformed} + +} +func expandContainerAwsClusterAuthorizationAdminUsersArray(o interface{}) []containeraws.ClusterAuthorizationAdminUsers { + if o == nil { + return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]containeraws.ClusterAuthorizationAdminUsers, 0) + } + + items := make([]containeraws.ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsClusterAuthorizationAdminUsers(o interface{}) *containeraws.ClusterAuthorizationAdminUsers { + if o == nil { + return containeraws.EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &containeraws.ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAwsClusterAuthorizationAdminUsersArray(objs []containeraws.ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsClusterAuthorizationAdminUsers(obj *containeraws.ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} + +func expandContainerAwsClusterControlPlane(o interface{}) *containeraws.ClusterControlPlane { + if o == nil { + return containeraws.EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlane{ + AwsServicesAuthentication: expandContainerAwsClusterControlPlaneAwsServicesAuthentication(obj["aws_services_authentication"]), + ConfigEncryption: expandContainerAwsClusterControlPlaneConfigEncryption(obj["config_encryption"]), + DatabaseEncryption: expandContainerAwsClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + SubnetIds: expandStringArray(obj["subnet_ids"]), + Version: dcl.String(obj["version"].(string)), + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + MainVolume: expandContainerAwsClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAwsClusterControlPlaneProxyConfig(obj["proxy_config"]), + RootVolume: expandContainerAwsClusterControlPlaneRootVolume(obj["root_volume"]), + SecurityGroupIds: expandStringArray(obj["security_group_ids"]), + SshConfig: expandContainerAwsClusterControlPlaneSshConfig(obj["ssh_config"]), + Tags: checkStringMap(obj["tags"]), + } +} + +func flattenContainerAwsClusterControlPlane(obj *containeraws.ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "aws_services_authentication": flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj.AwsServicesAuthentication), + "config_encryption": flattenContainerAwsClusterControlPlaneConfigEncryption(obj.ConfigEncryption), + "database_encryption": flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "subnet_ids": obj.SubnetIds, + "version": obj.Version, + "instance_type": obj.InstanceType, + "main_volume": flattenContainerAwsClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAwsClusterControlPlaneProxyConfig(obj.ProxyConfig), + "root_volume": flattenContainerAwsClusterControlPlaneRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, + "ssh_config": flattenContainerAwsClusterControlPlaneSshConfig(obj.SshConfig), + "tags": obj.Tags, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneAwsServicesAuthentication(o interface{}) *containeraws.ClusterControlPlaneAwsServicesAuthentication { + if o == nil { + return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlaneAwsServicesAuthentication + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneAwsServicesAuthentication{ + RoleArn: dcl.String(obj["role_arn"].(string)), + RoleSessionName: dcl.StringOrNil(obj["role_session_name"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneAwsServicesAuthentication(obj *containeraws.ClusterControlPlaneAwsServicesAuthentication) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "role_arn": obj.RoleArn, + "role_session_name": obj.RoleSessionName, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneConfigEncryption(o interface{}) *containeraws.ClusterControlPlaneConfigEncryption { + if o == nil { + return containeraws.EmptyClusterControlPlaneConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlaneConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneConfigEncryption(obj *containeraws.ClusterControlPlaneConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneDatabaseEncryption(o interface{}) *containeraws.ClusterControlPlaneDatabaseEncryption { + if o == nil { + return containeraws.EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneDatabaseEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneDatabaseEncryption(obj *containeraws.ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneMainVolume(o interface{}) *containeraws.ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneMainVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + VolumeType: containeraws.ClusterControlPlaneMainVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneMainVolume(obj *containeraws.ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneProxyConfig(o interface{}) *containeraws.ClusterControlPlaneProxyConfig { + if o == nil { + return containeraws.EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneProxyConfig{ + SecretArn: dcl.String(obj["secret_arn"].(string)), + SecretVersion: dcl.String(obj["secret_version"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneProxyConfig(obj *containeraws.ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "secret_arn": obj.SecretArn, + "secret_version": obj.SecretVersion, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneRootVolume(o interface{}) *containeraws.ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + VolumeType: containeraws.ClusterControlPlaneRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneRootVolume(obj *containeraws.ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterControlPlaneSshConfig(o interface{}) *containeraws.ClusterControlPlaneSshConfig { + if o == nil { + return containeraws.EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterControlPlaneSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsClusterControlPlaneSshConfig(obj *containeraws.ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterFleet(o interface{}) *containeraws.ClusterFleet { + if o == nil { + return containeraws.EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAwsClusterFleet(obj *containeraws.ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsClusterNetworking(o interface{}) *containeraws.ClusterNetworking { + if o == nil { + return containeraws.EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.ClusterNetworking{ + PodAddressCidrBlocks: expandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: expandStringArray(obj["service_address_cidr_blocks"]), + VPCId: dcl.String(obj["vpc_id"].(string)), + } +} + +func flattenContainerAwsClusterNetworking(obj *containeraws.ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "vpc_id": obj.VPCId, + } + + return []interface{}{transformed} + +} + +func flattenContainerAwsClusterWorkloadIdentityConfig(obj *containeraws.ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_container_aws_cluster_generated_test.go b/google/resource_container_aws_cluster_generated_test.go new file mode 100644 index 00000000000..6f115b7397c --- /dev/null +++ b/google/resource_container_aws_cluster_generated_test.go @@ -0,0 +1,294 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccContainerAwsCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": getTestProjectFromEnv(), + "project_number": getTestProjectNumberFromEnv(), + "service_acct": getTestServiceAccountFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerAwsClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + { + Config: testAccContainerAwsCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + }, + }) +} + +func testAccContainerAwsCluster_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-two = "value-two" + } + + description = "An updated sample aws cluster" + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAwsClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.Cluster{ + AwsRegion: dcl.String(rs.Primary.Attributes["aws_region"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLContainerAwsClient(config, config.userAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_container_aws_cluster_sweeper_test.go b/google/resource_container_aws_cluster_sweeper_test.go new file mode 100644 index 00000000000..20ab2282d2d --- /dev/null +++ b/google/resource_container_aws_cluster_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("ContainerAwsCluster", &resource.Sweeper{ + Name: "ContainerAwsCluster", + F: testSweepContainerAwsCluster, + }) +} + +func testSweepContainerAwsCluster(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAwsCluster") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLContainerAwsClient(config, config.userAgent, "", 0) + err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAwsCluster) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAwsCluster(r *containeraws.Cluster) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_container_aws_node_pool.go b/google/resource_container_aws_node_pool.go new file mode 100644 index 00000000000..88346060a28 --- /dev/null +++ b/google/resource_container_aws_node_pool.go @@ -0,0 +1,874 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" +) + +func resourceContainerAwsNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAwsNodePoolCreate, + Read: resourceContainerAwsNodePoolRead, + Update: resourceContainerAwsNodePoolUpdate, + Delete: resourceContainerAwsNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAwsNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The awsCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAwsNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The subnet where the node pool node run.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAwsNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. Maximum number of nodes in the NodePool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config_encryption": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt node pool configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigConfigEncryptionSchema(), + }, + + "iam_instance_profile": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the AWS IAM role assigned to nodes in the pool.", + }, + + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The AWS instance type. When unspecified, it defaults to `t3.medium`.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The initial labels assigned to nodes of this node pool. An object containing a list of \"key\": value pairs. Example: { \"name\": \"wrench\", \"mass\": \"1.3kg\", \"count\": \"3\" }.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigRootVolumeSchema(), + }, + + "security_group_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The SSH configuration.", + MaxItems: 1, + Elem: ContainerAwsNodePoolConfigSshConfigSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "taints": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The initial taints assigned to nodes of this node pool.", + Elem: ContainerAwsNodePoolConfigTaintsSchema(), + }, + }, + } +} + +func ContainerAwsNodePoolConfigConfigEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARN of the AWS KMS key used to encrypt node pool configuration.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume.", + }, + + "kms_key_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used.", + }, + + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + + "volume_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3", + }, + }, + } +} + +func ContainerAwsNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ec2_key_pair": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the EC2 key pair used to login into cluster machines.", + }, + }, + } +} + +func ContainerAwsNodePoolConfigTaintsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE", + }, + + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Key for the taint.", + }, + + "value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Value for the taint.", + }, + }, + } +} + +func ContainerAwsNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func resourceContainerAwsNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAwsNodePool %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAwsNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAwsNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAwsNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAwsNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAwsNodePoolRead(d, meta) +} + +func resourceContainerAwsNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containeraws.NodePool{ + Autoscaling: expandContainerAwsNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAwsNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAwsNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAwsClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAwsNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/awsClusters/(?P[^/]+)/awsNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAwsNodePoolAutoscaling(o interface{}) *containeraws.NodePoolAutoscaling { + if o == nil { + return containeraws.EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAwsNodePoolAutoscaling(obj *containeraws.NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfig(o interface{}) *containeraws.NodePoolConfig { + if o == nil { + return containeraws.EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfig{ + ConfigEncryption: expandContainerAwsNodePoolConfigConfigEncryption(obj["config_encryption"]), + IamInstanceProfile: dcl.String(obj["iam_instance_profile"].(string)), + InstanceType: dcl.StringOrNil(obj["instance_type"].(string)), + Labels: checkStringMap(obj["labels"]), + RootVolume: expandContainerAwsNodePoolConfigRootVolume(obj["root_volume"]), + SecurityGroupIds: expandStringArray(obj["security_group_ids"]), + SshConfig: expandContainerAwsNodePoolConfigSshConfig(obj["ssh_config"]), + Tags: checkStringMap(obj["tags"]), + Taints: expandContainerAwsNodePoolConfigTaintsArray(obj["taints"]), + } +} + +func flattenContainerAwsNodePoolConfig(obj *containeraws.NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "config_encryption": flattenContainerAwsNodePoolConfigConfigEncryption(obj.ConfigEncryption), + "iam_instance_profile": obj.IamInstanceProfile, + "instance_type": obj.InstanceType, + "labels": obj.Labels, + "root_volume": flattenContainerAwsNodePoolConfigRootVolume(obj.RootVolume), + "security_group_ids": obj.SecurityGroupIds, + "ssh_config": flattenContainerAwsNodePoolConfigSshConfig(obj.SshConfig), + "tags": obj.Tags, + "taints": flattenContainerAwsNodePoolConfigTaintsArray(obj.Taints), + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigConfigEncryption(o interface{}) *containeraws.NodePoolConfigConfigEncryption { + if o == nil { + return containeraws.EmptyNodePoolConfigConfigEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyNodePoolConfigConfigEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigConfigEncryption{ + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigConfigEncryption(obj *containeraws.NodePoolConfigConfigEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kms_key_arn": obj.KmsKeyArn, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigRootVolume(o interface{}) *containeraws.NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigRootVolume{ + Iops: dcl.Int64OrNil(int64(obj["iops"].(int))), + KmsKeyArn: dcl.String(obj["kms_key_arn"].(string)), + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + VolumeType: containeraws.NodePoolConfigRootVolumeVolumeTypeEnumRef(obj["volume_type"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigRootVolume(obj *containeraws.NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "iops": obj.Iops, + "kms_key_arn": obj.KmsKeyArn, + "size_gib": obj.SizeGib, + "volume_type": obj.VolumeType, + } + + return []interface{}{transformed} + +} + +func expandContainerAwsNodePoolConfigSshConfig(o interface{}) *containeraws.NodePoolConfigSshConfig { + if o == nil { + return containeraws.EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolConfigSshConfig{ + Ec2KeyPair: dcl.String(obj["ec2_key_pair"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigSshConfig(obj *containeraws.NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ec2_key_pair": obj.Ec2KeyPair, + } + + return []interface{}{transformed} + +} +func expandContainerAwsNodePoolConfigTaintsArray(o interface{}) []containeraws.NodePoolConfigTaints { + if o == nil { + return make([]containeraws.NodePoolConfigTaints, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]containeraws.NodePoolConfigTaints, 0) + } + + items := make([]containeraws.NodePoolConfigTaints, 0, len(objs)) + for _, item := range objs { + i := expandContainerAwsNodePoolConfigTaints(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAwsNodePoolConfigTaints(o interface{}) *containeraws.NodePoolConfigTaints { + if o == nil { + return containeraws.EmptyNodePoolConfigTaints + } + + obj := o.(map[string]interface{}) + return &containeraws.NodePoolConfigTaints{ + Effect: containeraws.NodePoolConfigTaintsEffectEnumRef(obj["effect"].(string)), + Key: dcl.String(obj["key"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenContainerAwsNodePoolConfigTaintsArray(objs []containeraws.NodePoolConfigTaints) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAwsNodePoolConfigTaints(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAwsNodePoolConfigTaints(obj *containeraws.NodePoolConfigTaints) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "effect": obj.Effect, + "key": obj.Key, + "value": obj.Value, + } + + return transformed + +} + +func expandContainerAwsNodePoolMaxPodsConstraint(o interface{}) *containeraws.NodePoolMaxPodsConstraint { + if o == nil { + return containeraws.EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containeraws.EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &containeraws.NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAwsNodePoolMaxPodsConstraint(obj *containeraws.NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_container_aws_node_pool_generated_test.go b/google/resource_container_aws_node_pool_generated_test.go new file mode 100644 index 00000000000..b5638715ceb --- /dev/null +++ b/google/resource_container_aws_node_pool_generated_test.go @@ -0,0 +1,417 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccContainerAwsNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "aws_acct_id": "111111111111", + "aws_db_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_region": "us-west-2", + "aws_sg": "sg-0b3f63cb91b247628", + "aws_subnet": "subnet-0b3f63cb91b247628", + "aws_vol_key": "00000000-0000-0000-0000-17aad2f0f61f", + "aws_vpc": "vpc-0b3f63cb91b247628", + "byo_prefix": "mmv2", + "project_name": getTestProjectFromEnv(), + "project_number": getTestProjectNumberFromEnv(), + "service_acct": getTestServiceAccountFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerAwsNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAwsNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + { + Config: testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_aws_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + }, + }) +} + +func testAccContainerAwsNodePool_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAwsNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +data "google_container_aws_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "%{service_acct}" + } + } + + aws_region = "%{aws_region}" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::%{aws_acct_id}:role/%{byo_prefix}-1p-dev-oneplatform" + role_session_name = "%{byo_prefix}-1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-controlplane" + subnet_ids = ["%{aws_subnet}"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + owner = "%{service_acct}" + } + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "%{aws_vpc}" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "%{project_name}" +} + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_db_key}" + } + + iam_instance_profile = "%{byo_prefix}-1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:%{aws_region}:%{aws_acct_id}:key/%{aws_vol_key}" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["%{aws_sg}"] + + ssh_config { + ec2_key_pair = "%{byo_prefix}-1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "%{aws_subnet}" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAwsNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_aws_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containeraws.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containeraws.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLContainerAwsClient(config, config.userAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_aws_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_container_azure_client.go b/google/resource_container_azure_client.go new file mode 100644 index 00000000000..97cae7b84f9 --- /dev/null +++ b/google/resource_container_azure_client.go @@ -0,0 +1,279 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +) + +func resourceContainerAzureClient() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClientCreate, + Read: resourceContainerAzureClientRead, + Delete: resourceContainerAzureClientDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClientImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "application_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Azure Active Directory Application ID.", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "tenant_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Azure Active Directory Tenant ID.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "certificate": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The PEM encoded x509 certificate.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this resource was created.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the client.", + }, + }, + } +} + +func resourceContainerAzureClientCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyClient(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Client: %s", err) + } + + log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) + + return resourceContainerAzureClientRead(d, meta) +} + +func resourceContainerAzureClientRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetClient(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureClient %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("application_id", res.ApplicationId); err != nil { + return fmt.Errorf("error setting application_id in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("tenant_id", res.TenantId); err != nil { + return fmt.Errorf("error setting tenant_id in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("certificate", res.Certificate); err != nil { + return fmt.Errorf("error setting certificate in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} + +func resourceContainerAzureClientDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(d.Get("application_id").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + TenantId: dcl.String(d.Get("tenant_id").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Client %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteClient(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Client: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Client %q", d.Id()) + return nil +} + +func resourceContainerAzureClientImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClients/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClients/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_container_azure_client_generated_test.go b/google/resource_container_azure_client_generated_test.go new file mode 100644 index 00000000000..26a3b5b449e --- /dev/null +++ b/google/resource_container_azure_client_generated_test.go @@ -0,0 +1,105 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccContainerAzureClient_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerAzureClientDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureClient_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_client.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerAzureClient_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_azure_client" "primary" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +`, context) +} + +func testAccCheckContainerAzureClientDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_client" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.AzureClient{ + ApplicationId: dcl.String(rs.Primary.Attributes["application_id"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + TenantId: dcl.String(rs.Primary.Attributes["tenant_id"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Certificate: dcl.StringOrNil(rs.Primary.Attributes["certificate"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := NewDCLContainerAzureClient(config, config.userAgent, billingProject, 0) + _, err := client.GetClient(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_client still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_container_azure_client_sweeper_test.go b/google/resource_container_azure_client_sweeper_test.go new file mode 100644 index 00000000000..6b47be7151d --- /dev/null +++ b/google/resource_container_azure_client_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("ContainerAzureClient", &resource.Sweeper{ + Name: "ContainerAzureClient", + F: testSweepContainerAzureClient, + }) +} + +func testSweepContainerAzureClient(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureClient") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLContainerAzureClient(config, config.userAgent, "", 0) + err = client.DeleteAllClient(context.Background(), d["project"], d["location"], isDeletableContainerAzureClient) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAzureClient(r *containerazure.AzureClient) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_container_azure_cluster.go b/google/resource_container_azure_cluster.go new file mode 100644 index 00000000000..2153cc87834 --- /dev/null +++ b/google/resource_container_azure_cluster.go @@ -0,0 +1,1148 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +) + +func resourceContainerAzureCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureClusterCreate, + Read: resourceContainerAzureClusterRead, + Update: resourceContainerAzureClusterUpdate, + Delete: resourceContainerAzureClusterDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureClusterImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "authorization": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Configuration related to the cluster RBAC settings.", + MaxItems: 1, + Elem: ContainerAzureClusterAuthorizationSchema(), + }, + + "azure_region": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region.", + }, + + "client": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Required. Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names.", + }, + + "control_plane": { + Type: schema.TypeList, + Required: true, + Description: "Required. Configuration related to the cluster control plane.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSchema(), + }, + + "fleet": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Fleet configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterFleetSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "networking": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Cluster-wide networking configuration.", + MaxItems: 1, + Elem: ContainerAzureClusterNetworkingSchema(), + }, + + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*`", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was created.", + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The endpoint of the cluster's API server.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently changes in flight to the cluster.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the cluster.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this cluster was last updated.", + }, + + "workload_identity_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. Workload Identity settings.", + Elem: ContainerAzureClusterWorkloadIdentityConfigSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_users": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. At most one user can be specified. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles", + Elem: ContainerAzureClusterAuthorizationAdminUsersSchema(), + }, + }, + } +} + +func ContainerAzureClusterAuthorizationAdminUsersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "username": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The name of the user, e.g. `my-gcp-id@gmail.com`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. SSH configuration for how to access the underlying control plane machines.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneSshConfigSchema(), + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "Required. The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig.", + }, + + "database_encryption": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to application-layer secrets encryption.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneDatabaseEncryptionSchema(), + }, + + "main_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneMainVolumeSchema(), + }, + + "proxy_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Proxy configuration for outbound HTTP(S) traffic.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneProxyConfigSchema(), + }, + + "replica_placements": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible.", + Elem: ContainerAzureClusterControlPlaneReplicaPlacementsSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureClusterControlPlaneRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying control plane Azure resources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneDatabaseEncryptionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneMainVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneProxyConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/`", + }, + + "secret_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneReplicaPlacementsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "azure_availability_zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + }, + } +} + +func ContainerAzureClusterControlPlaneRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureClusterFleetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The number of the Fleet host project where this cluster will be registered.", + }, + + "membership": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/.", + }, + }, + } +} + +func ContainerAzureClusterNetworkingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_address_cidr_blocks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "virtual_network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation.", + }, + }, + } +} + +func ContainerAzureClusterWorkloadIdentityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.", + }, + + "issuer_uri": { + Type: schema.TypeString, + Computed: true, + Description: "The OIDC issuer URL for this cluster.", + }, + + "workload_pool": { + Type: schema.TypeString, + Computed: true, + Description: "The Workload Identity Pool associated to the cluster.", + }, + }, + } +} + +func resourceContainerAzureClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + Client: dcl.String(d.Get("client").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + Client: dcl.String(d.Get("client").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCluster(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureCluster %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("authorization", flattenContainerAzureClusterAuthorization(res.Authorization)); err != nil { + return fmt.Errorf("error setting authorization in state: %s", err) + } + if err = d.Set("azure_region", res.AzureRegion); err != nil { + return fmt.Errorf("error setting azure_region in state: %s", err) + } + if err = d.Set("client", res.Client); err != nil { + return fmt.Errorf("error setting client in state: %s", err) + } + if err = d.Set("control_plane", flattenContainerAzureClusterControlPlane(res.ControlPlane)); err != nil { + return fmt.Errorf("error setting control_plane in state: %s", err) + } + if err = d.Set("fleet", flattenContainerAzureClusterFleet(res.Fleet)); err != nil { + return fmt.Errorf("error setting fleet in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("networking", flattenContainerAzureClusterNetworking(res.Networking)); err != nil { + return fmt.Errorf("error setting networking in state: %s", err) + } + if err = d.Set("resource_group_id", res.ResourceGroupId); err != nil { + return fmt.Errorf("error setting resource_group_id in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("endpoint", res.Endpoint); err != nil { + return fmt.Errorf("error setting endpoint in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + if err = d.Set("workload_identity_config", flattenContainerAzureClusterWorkloadIdentityConfig(res.WorkloadIdentityConfig)); err != nil { + return fmt.Errorf("error setting workload_identity_config in state: %s", err) + } + + return nil +} +func resourceContainerAzureClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + Client: dcl.String(d.Get("client").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCluster(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) + + return resourceContainerAzureClusterRead(d, meta) +} + +func resourceContainerAzureClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.Cluster{ + Authorization: expandContainerAzureClusterAuthorization(d.Get("authorization")), + AzureRegion: dcl.String(d.Get("azure_region").(string)), + Client: dcl.String(d.Get("client").(string)), + ControlPlane: expandContainerAzureClusterControlPlane(d.Get("control_plane")), + Fleet: expandContainerAzureClusterFleet(d.Get("fleet")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Networking: expandContainerAzureClusterNetworking(d.Get("networking")), + ResourceGroupId: dcl.String(d.Get("resource_group_id").(string)), + Annotations: checkStringMap(d.Get("annotations")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCluster(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Cluster %q", d.Id()) + return nil +} + +func resourceContainerAzureClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureClusterAuthorization(o interface{}) *containerazure.ClusterAuthorization { + if o == nil { + return containerazure.EmptyClusterAuthorization + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterAuthorization + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterAuthorization{ + AdminUsers: expandContainerAzureClusterAuthorizationAdminUsersArray(obj["admin_users"]), + } +} + +func flattenContainerAzureClusterAuthorization(obj *containerazure.ClusterAuthorization) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "admin_users": flattenContainerAzureClusterAuthorizationAdminUsersArray(obj.AdminUsers), + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterAuthorizationAdminUsersArray(o interface{}) []containerazure.ClusterAuthorizationAdminUsers { + if o == nil { + return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]containerazure.ClusterAuthorizationAdminUsers, 0) + } + + items := make([]containerazure.ClusterAuthorizationAdminUsers, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterAuthorizationAdminUsers(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterAuthorizationAdminUsers(o interface{}) *containerazure.ClusterAuthorizationAdminUsers { + if o == nil { + return containerazure.EmptyClusterAuthorizationAdminUsers + } + + obj := o.(map[string]interface{}) + return &containerazure.ClusterAuthorizationAdminUsers{ + Username: dcl.String(obj["username"].(string)), + } +} + +func flattenContainerAzureClusterAuthorizationAdminUsersArray(objs []containerazure.ClusterAuthorizationAdminUsers) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterAuthorizationAdminUsers(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterAuthorizationAdminUsers(obj *containerazure.ClusterAuthorizationAdminUsers) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "username": obj.Username, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlane(o interface{}) *containerazure.ClusterControlPlane { + if o == nil { + return containerazure.EmptyClusterControlPlane + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterControlPlane + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlane{ + SshConfig: expandContainerAzureClusterControlPlaneSshConfig(obj["ssh_config"]), + SubnetId: dcl.String(obj["subnet_id"].(string)), + Version: dcl.String(obj["version"].(string)), + DatabaseEncryption: expandContainerAzureClusterControlPlaneDatabaseEncryption(obj["database_encryption"]), + MainVolume: expandContainerAzureClusterControlPlaneMainVolume(obj["main_volume"]), + ProxyConfig: expandContainerAzureClusterControlPlaneProxyConfig(obj["proxy_config"]), + ReplicaPlacements: expandContainerAzureClusterControlPlaneReplicaPlacementsArray(obj["replica_placements"]), + RootVolume: expandContainerAzureClusterControlPlaneRootVolume(obj["root_volume"]), + Tags: checkStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureClusterControlPlane(obj *containerazure.ClusterControlPlane) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureClusterControlPlaneSshConfig(obj.SshConfig), + "subnet_id": obj.SubnetId, + "version": obj.Version, + "database_encryption": flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj.DatabaseEncryption), + "main_volume": flattenContainerAzureClusterControlPlaneMainVolume(obj.MainVolume), + "proxy_config": flattenContainerAzureClusterControlPlaneProxyConfig(obj.ProxyConfig), + "replica_placements": flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(obj.ReplicaPlacements), + "root_volume": flattenContainerAzureClusterControlPlaneRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneSshConfig(o interface{}) *containerazure.ClusterControlPlaneSshConfig { + if o == nil { + return containerazure.EmptyClusterControlPlaneSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterControlPlaneSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneSshConfig(obj *containerazure.ClusterControlPlaneSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneDatabaseEncryption(o interface{}) *containerazure.ClusterControlPlaneDatabaseEncryption { + if o == nil { + return containerazure.EmptyClusterControlPlaneDatabaseEncryption + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterControlPlaneDatabaseEncryption + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneDatabaseEncryption{ + KeyId: dcl.String(obj["key_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneDatabaseEncryption(obj *containerazure.ClusterControlPlaneDatabaseEncryption) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "key_id": obj.KeyId, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneMainVolume(o interface{}) *containerazure.ClusterControlPlaneMainVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneMainVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneMainVolume(obj *containerazure.ClusterControlPlaneMainVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterControlPlaneProxyConfig(o interface{}) *containerazure.ClusterControlPlaneProxyConfig { + if o == nil { + return containerazure.EmptyClusterControlPlaneProxyConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterControlPlaneProxyConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneProxyConfig{ + ResourceGroupId: dcl.String(obj["resource_group_id"].(string)), + SecretId: dcl.String(obj["secret_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneProxyConfig(obj *containerazure.ClusterControlPlaneProxyConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resource_group_id": obj.ResourceGroupId, + "secret_id": obj.SecretId, + } + + return []interface{}{transformed} + +} +func expandContainerAzureClusterControlPlaneReplicaPlacementsArray(o interface{}) []containerazure.ClusterControlPlaneReplicaPlacements { + if o == nil { + return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]containerazure.ClusterControlPlaneReplicaPlacements, 0) + } + + items := make([]containerazure.ClusterControlPlaneReplicaPlacements, 0, len(objs)) + for _, item := range objs { + i := expandContainerAzureClusterControlPlaneReplicaPlacements(item) + items = append(items, *i) + } + + return items +} + +func expandContainerAzureClusterControlPlaneReplicaPlacements(o interface{}) *containerazure.ClusterControlPlaneReplicaPlacements { + if o == nil { + return containerazure.EmptyClusterControlPlaneReplicaPlacements + } + + obj := o.(map[string]interface{}) + return &containerazure.ClusterControlPlaneReplicaPlacements{ + AzureAvailabilityZone: dcl.String(obj["azure_availability_zone"].(string)), + SubnetId: dcl.String(obj["subnet_id"].(string)), + } +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacementsArray(objs []containerazure.ClusterControlPlaneReplicaPlacements) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenContainerAzureClusterControlPlaneReplicaPlacements(&item) + items = append(items, i) + } + + return items +} + +func flattenContainerAzureClusterControlPlaneReplicaPlacements(obj *containerazure.ClusterControlPlaneReplicaPlacements) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "azure_availability_zone": obj.AzureAvailabilityZone, + "subnet_id": obj.SubnetId, + } + + return transformed + +} + +func expandContainerAzureClusterControlPlaneRootVolume(o interface{}) *containerazure.ClusterControlPlaneRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterControlPlaneRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureClusterControlPlaneRootVolume(obj *containerazure.ClusterControlPlaneRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterFleet(o interface{}) *containerazure.ClusterFleet { + if o == nil { + return containerazure.EmptyClusterFleet + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterFleet + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterFleet{ + Project: dcl.StringOrNil(obj["project"].(string)), + } +} + +func flattenContainerAzureClusterFleet(obj *containerazure.ClusterFleet) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "project": obj.Project, + "membership": obj.Membership, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureClusterNetworking(o interface{}) *containerazure.ClusterNetworking { + if o == nil { + return containerazure.EmptyClusterNetworking + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyClusterNetworking + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.ClusterNetworking{ + PodAddressCidrBlocks: expandStringArray(obj["pod_address_cidr_blocks"]), + ServiceAddressCidrBlocks: expandStringArray(obj["service_address_cidr_blocks"]), + VirtualNetworkId: dcl.String(obj["virtual_network_id"].(string)), + } +} + +func flattenContainerAzureClusterNetworking(obj *containerazure.ClusterNetworking) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pod_address_cidr_blocks": obj.PodAddressCidrBlocks, + "service_address_cidr_blocks": obj.ServiceAddressCidrBlocks, + "virtual_network_id": obj.VirtualNetworkId, + } + + return []interface{}{transformed} + +} + +func flattenContainerAzureClusterWorkloadIdentityConfig(obj *containerazure.ClusterWorkloadIdentityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "identity_provider": obj.IdentityProvider, + "issuer_uri": obj.IssuerUri, + "workload_pool": obj.WorkloadPool, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_container_azure_cluster_generated_test.go b/google/resource_container_azure_cluster_generated_test.go new file mode 100644 index 00000000000..3d81fa7bef3 --- /dev/null +++ b/google/resource_container_azure_cluster_generated_test.go @@ -0,0 +1,252 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccContainerAzureCluster_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": getTestProjectFromEnv(), + "project_number": getTestProjectNumberFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerAzureClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureCluster_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + { + Config: testAccContainerAzureCluster_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"fleet.0.project"}, + }, + }, + }) +} + +func testAccContainerAzureCluster_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureCluster_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "1.21.5-gke.2800" + + database_encryption { + key_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster/providers/Microsoft.KeyVault/vaults/%{byo_prefix}-dev-keyvault/keys/%{byo_prefix}-dev-key" + } + + main_volume { + size_gib = 8 + } + + proxy_config { + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + secret_id = "https://%{byo_prefix}-dev-keyvault.vault.azure.net/secrets/%{byo_prefix}-dev-secret/%{azure_config_secret}" + } + + replica_placements { + azure_availability_zone = "1" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + + annotations = { + annotation-one = "value-one" + } + + description = "An updated sample azure cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + + + +`, context) +} + +func testAccCheckContainerAzureClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_cluster" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.Cluster{ + AzureRegion: dcl.String(rs.Primary.Attributes["azure_region"]), + Client: dcl.String(rs.Primary.Attributes["client"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + ResourceGroupId: dcl.String(rs.Primary.Attributes["resource_group_id"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Endpoint: dcl.StringOrNil(rs.Primary.Attributes["endpoint"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.ClusterStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLContainerAzureClient(config, config.userAgent, billingProject, 0) + _, err := client.GetCluster(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_cluster still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_container_azure_cluster_sweeper_test.go b/google/resource_container_azure_cluster_sweeper_test.go new file mode 100644 index 00000000000..18031c0a9af --- /dev/null +++ b/google/resource_container_azure_cluster_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("ContainerAzureCluster", &resource.Sweeper{ + Name: "ContainerAzureCluster", + F: testSweepContainerAzureCluster, + }) +} + +func testSweepContainerAzureCluster(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureCluster") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLContainerAzureClient(config, config.userAgent, "", 0) + err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAzureCluster) + if err != nil { + return err + } + return nil +} + +func isDeletableContainerAzureCluster(r *containerazure.Cluster) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_container_azure_node_pool.go b/google/resource_container_azure_node_pool.go new file mode 100644 index 00000000000..a78b9d28280 --- /dev/null +++ b/google/resource_container_azure_node_pool.go @@ -0,0 +1,684 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" +) + +func resourceContainerAzureNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerAzureNodePoolCreate, + Read: resourceContainerAzureNodePoolRead, + Update: resourceContainerAzureNodePoolUpdate, + Delete: resourceContainerAzureNodePoolDelete, + + Importer: &schema.ResourceImporter{ + State: resourceContainerAzureNodePoolImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Autoscaler configuration for this node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolAutoscalingSchema(), + }, + + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The azureCluster for the resource", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The node configuration of the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "max_pods_constraint": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool.", + MaxItems: 1, + Elem: ContainerAzureNodePoolMaxPodsConstraintSchema(), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of this resource.", + }, + + "subnet_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration.", + }, + + "version": { + Type: schema.TypeString, + Required: true, + Description: "Required. The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool.", + }, + + "annotations": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "azure_availability_zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was created.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. If set, there are currently pending changes to the node pool.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. A globally unique identifier for the node pool.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this node pool was last updated.", + }, + }, + } +} + +func ContainerAzureNodePoolAutoscalingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. Maximum number of nodes in the node pool. Must be >= min_node_count.", + }, + + "min_node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssh_config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. SSH configuration for how to access the node pool machines.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigSshConfigSchema(), + }, + + "root_volume": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.", + MaxItems: 1, + Elem: ContainerAzureNodePoolConfigRootVolumeSchema(), + }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "vm_size": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigSshConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page.", + }, + }, + } +} + +func ContainerAzureNodePoolConfigRootVolumeSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_gib": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource.", + }, + }, + } +} + +func ContainerAzureNodePoolMaxPodsConstraintSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_pods_per_node": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Required. The maximum number of pods to schedule on a single node.", + }, + }, + } +} + +func resourceContainerAzureNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetNodePool(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("ContainerAzureNodePool %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("autoscaling", flattenContainerAzureNodePoolAutoscaling(res.Autoscaling)); err != nil { + return fmt.Errorf("error setting autoscaling in state: %s", err) + } + if err = d.Set("cluster", res.Cluster); err != nil { + return fmt.Errorf("error setting cluster in state: %s", err) + } + if err = d.Set("config", flattenContainerAzureNodePoolConfig(res.Config)); err != nil { + return fmt.Errorf("error setting config in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("max_pods_constraint", flattenContainerAzureNodePoolMaxPodsConstraint(res.MaxPodsConstraint)); err != nil { + return fmt.Errorf("error setting max_pods_constraint in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("subnet_id", res.SubnetId); err != nil { + return fmt.Errorf("error setting subnet_id in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("annotations", res.Annotations); err != nil { + return fmt.Errorf("error setting annotations in state: %s", err) + } + if err = d.Set("azure_availability_zone", res.AzureAvailabilityZone); err != nil { + return fmt.Errorf("error setting azure_availability_zone in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceContainerAzureNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyNodePool(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished creating NodePool %q: %#v", d.Id(), res) + + return resourceContainerAzureNodePoolRead(d, meta) +} + +func resourceContainerAzureNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &containerazure.NodePool{ + Autoscaling: expandContainerAzureNodePoolAutoscaling(d.Get("autoscaling")), + Cluster: dcl.String(d.Get("cluster").(string)), + Config: expandContainerAzureNodePoolConfig(d.Get("config")), + Location: dcl.String(d.Get("location").(string)), + MaxPodsConstraint: expandContainerAzureNodePoolMaxPodsConstraint(d.Get("max_pods_constraint")), + Name: dcl.String(d.Get("name").(string)), + SubnetId: dcl.String(d.Get("subnet_id").(string)), + Version: dcl.String(d.Get("version").(string)), + Annotations: checkStringMap(d.Get("annotations")), + AzureAvailabilityZone: dcl.StringOrNil(d.Get("azure_availability_zone").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting NodePool %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLContainerAzureClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteNodePool(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + log.Printf("[DEBUG] Finished deleting NodePool %q", d.Id()) + return nil +} + +func resourceContainerAzureNodePoolImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/azureClusters/(?P[^/]+)/azureNodePools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandContainerAzureNodePoolAutoscaling(o interface{}) *containerazure.NodePoolAutoscaling { + if o == nil { + return containerazure.EmptyNodePoolAutoscaling + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyNodePoolAutoscaling + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolAutoscaling{ + MaxNodeCount: dcl.Int64(int64(obj["max_node_count"].(int))), + MinNodeCount: dcl.Int64(int64(obj["min_node_count"].(int))), + } +} + +func flattenContainerAzureNodePoolAutoscaling(obj *containerazure.NodePoolAutoscaling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_node_count": obj.MaxNodeCount, + "min_node_count": obj.MinNodeCount, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfig(o interface{}) *containerazure.NodePoolConfig { + if o == nil { + return containerazure.EmptyNodePoolConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyNodePoolConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfig{ + SshConfig: expandContainerAzureNodePoolConfigSshConfig(obj["ssh_config"]), + RootVolume: expandContainerAzureNodePoolConfigRootVolume(obj["root_volume"]), + Tags: checkStringMap(obj["tags"]), + VmSize: dcl.StringOrNil(obj["vm_size"].(string)), + } +} + +func flattenContainerAzureNodePoolConfig(obj *containerazure.NodePoolConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ssh_config": flattenContainerAzureNodePoolConfigSshConfig(obj.SshConfig), + "root_volume": flattenContainerAzureNodePoolConfigRootVolume(obj.RootVolume), + "tags": obj.Tags, + "vm_size": obj.VmSize, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigSshConfig(o interface{}) *containerazure.NodePoolConfigSshConfig { + if o == nil { + return containerazure.EmptyNodePoolConfigSshConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyNodePoolConfigSshConfig + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfigSshConfig{ + AuthorizedKey: dcl.String(obj["authorized_key"].(string)), + } +} + +func flattenContainerAzureNodePoolConfigSshConfig(obj *containerazure.NodePoolConfigSshConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "authorized_key": obj.AuthorizedKey, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolConfigRootVolume(o interface{}) *containerazure.NodePoolConfigRootVolume { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolConfigRootVolume{ + SizeGib: dcl.Int64OrNil(int64(obj["size_gib"].(int))), + } +} + +func flattenContainerAzureNodePoolConfigRootVolume(obj *containerazure.NodePoolConfigRootVolume) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "size_gib": obj.SizeGib, + } + + return []interface{}{transformed} + +} + +func expandContainerAzureNodePoolMaxPodsConstraint(o interface{}) *containerazure.NodePoolMaxPodsConstraint { + if o == nil { + return containerazure.EmptyNodePoolMaxPodsConstraint + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return containerazure.EmptyNodePoolMaxPodsConstraint + } + obj := objArr[0].(map[string]interface{}) + return &containerazure.NodePoolMaxPodsConstraint{ + MaxPodsPerNode: dcl.Int64(int64(obj["max_pods_per_node"].(int))), + } +} + +func flattenContainerAzureNodePoolMaxPodsConstraint(obj *containerazure.NodePoolMaxPodsConstraint) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_pods_per_node": obj.MaxPodsPerNode, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_container_azure_node_pool_generated_test.go b/google/resource_container_azure_node_pool_generated_test.go new file mode 100644 index 00000000000..3f9e47d7c18 --- /dev/null +++ b/google/resource_container_azure_node_pool_generated_test.go @@ -0,0 +1,302 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccContainerAzureNodePool_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "azure_app": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_config_secret": "07d4b1f1a7cb4b1b91f070c30ae761a1", + "azure_sub": "00000000-0000-0000-0000-17aad2f0f61f", + "azure_tenant": "00000000-0000-0000-0000-17aad2f0f61f", + "byo_prefix": "mmv2", + "project_name": getTestProjectFromEnv(), + "project_number": getTestProjectNumberFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerAzureNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAzureNodePool_BasicHandWritten(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_container_azure_node_pool.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerAzureNodePool_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccContainerAzureNodePool_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +data "google_container_azure_versions" "versions" { + project = "%{project_name}" + location = "us-west1" +} + + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/%{project_number}/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "%{project_number}" + } + + location = "us-west1" + name = "tf-test-name%{random_suffix}" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet" + } + + resource_group_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-cluster" + project = "%{project_name}" +} + +resource "google_container_azure_client" "basic" { + application_id = "%{azure_app}" + location = "us-west1" + name = "tf-test-client-name%{random_suffix}" + tenant_id = "%{azure_tenant}" + project = "%{project_name}" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "tf-test-node-pool-name%{random_suffix}" + subnet_id = "/subscriptions/%{azure_sub}/resourceGroups/%{byo_prefix}-dev-byo/providers/Microsoft.Network/virtualNetworks/%{byo_prefix}-dev-vnet/subnets/default" + version = "1.21.5-gke.2800" + + annotations = { + annotation-two = "value-two" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckContainerAzureNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_container_azure_node_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &containerazure.NodePool{ + Cluster: dcl.String(rs.Primary.Attributes["cluster"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + SubnetId: dcl.String(rs.Primary.Attributes["subnet_id"]), + Version: dcl.String(rs.Primary.Attributes["version"]), + AzureAvailabilityZone: dcl.StringOrNil(rs.Primary.Attributes["azure_availability_zone"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + State: containerazure.NodePoolStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLContainerAzureClient(config, config.userAgent, billingProject, 0) + _, err := client.GetNodePool(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_container_azure_node_pool still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_dataproc_workflow_template.go b/google/resource_dataproc_workflow_template.go new file mode 100644 index 00000000000..3c98fc4aa36 --- /dev/null +++ b/google/resource_dataproc_workflow_template.go @@ -0,0 +1,3450 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" +) + +func resourceDataprocWorkflowTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocWorkflowTemplateCreate, + Read: resourceDataprocWorkflowTemplateRead, + Delete: resourceDataprocWorkflowTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocWorkflowTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "jobs": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The Directed Acyclic Graph of Jobs to submit.", + Elem: DataprocWorkflowTemplateJobsSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", + }, + + "placement": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. WorkflowTemplate scheduling information.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementSchema(), + }, + + "dag_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "parameters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + Elem: DataprocWorkflowTemplateParametersSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "version": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Output only. The current version of this workflow template.", + Deprecated: "version is not useful as a configurable field, and will be removed in the future.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was created.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was last updated.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + }, + + "hadoop_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hadoop job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobSchema(), + }, + + "hive_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hive job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "pig_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Pig job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobSchema(), + }, + + "prerequisite_step_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "presto_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Presto job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobSchema(), + }, + + "pyspark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a PySpark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobSchema(), + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job scheduling configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSchedulingSchema(), + }, + + "spark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Spark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobSchema(), + }, + + "spark_r_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkR job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobSchema(), + }, + + "spark_sql_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkSql job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains Hive queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains the Pig queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Presto client tags to attach to this query", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema(), + }, + + "output_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobQueryListSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchedulingSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", + }, + + "max_failures_total": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file that contains the main class.", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_r_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_selector": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementClusterSelectorSchema(), + }, + + "managed_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A cluster that is managed by the workflow.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementClusterSelectorSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_labels": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Description: "Required. The cluster labels. Cluster must have all labels to match.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The cluster configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateParametersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", + }, + + "validation": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Validation rules to be applied to this parameter's value.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on regular expressions.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationRegexSchema(), + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on a list of allowed values.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationValuesSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationRegexSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regexes": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. List of allowed values for the parameter.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateClusterInstanceGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsSchema(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfigSchema(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigAutoscalingConfigSchema(), + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Encryption settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigEncryptionConfigSchema(), + }, + + "endpoint_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Port/endpoint configuration for this cluster", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigEndpointConfigSchema(), + }, + + "gce_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigSchema(), + }, + + "initialization_actions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Elem: DataprocWorkflowTemplateClusterClusterConfigInitializationActionsSchema(), + }, + + "lifecycle_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Lifecycle setting for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigLifecycleConfigSchema(), + }, + + "master_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), + }, + + "secondary_worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), + }, + + "security_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Security settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigSecurityConfigSchema(), + }, + + "software_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The config settings for software inside the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigSoftwareConfigSchema(), + }, + + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "temp_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigAutoscalingConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigEncryptionConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gce_pd_kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigEndpointConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_http_port_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + }, + + "http_ports": { + Type: schema.TypeMap, + Computed: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "internal_ip_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + }, + + "node_group_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinitySchema(), + }, + + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + }, + + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinitySchema(), + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + }, + + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinitySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label key of reservation resource.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label values of reservation resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigInitializationActionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "executable_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Required. Cloud Storage URI of executable file.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigLifecycleConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete_time": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "auto_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_start_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigSecurityConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kerberos_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Kerberos related configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfigSchema(), + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cross_realm_trust_admin_server": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_kdc": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + }, + + "cross_realm_trust_shared_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + }, + + "enable_kerberos": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + }, + + "kdc_db_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + }, + + "key_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "keystore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "keystore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + }, + + "realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + }, + + "root_principal_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + }, + + "tgt_lifetime_hours": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + }, + + "truststore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "truststore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + }, + }, + } +} + +func DataprocWorkflowTemplateClusterClusterConfigSoftwareConfigSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + }, + + "optional_components": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The set of components to activate on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceDataprocWorkflowTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataproc.WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: checkStringMap(d.Get("labels")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyWorkflowTemplate(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating WorkflowTemplate %q: %#v", d.Id(), res) + + return resourceDataprocWorkflowTemplateRead(d, meta) +} + +func resourceDataprocWorkflowTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataproc.WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: checkStringMap(d.Get("labels")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetWorkflowTemplate(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("DataprocWorkflowTemplate %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("jobs", flattenDataprocWorkflowTemplateJobsArray(res.Jobs)); err != nil { + return fmt.Errorf("error setting jobs in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("placement", flattenDataprocWorkflowTemplatePlacement(res.Placement)); err != nil { + return fmt.Errorf("error setting placement in state: %s", err) + } + if err = d.Set("dag_timeout", res.DagTimeout); err != nil { + return fmt.Errorf("error setting dag_timeout in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("parameters", flattenDataprocWorkflowTemplateParametersArray(res.Parameters)); err != nil { + return fmt.Errorf("error setting parameters in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("version", res.Version); err != nil { + return fmt.Errorf("error setting version in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} + +func resourceDataprocWorkflowTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &dataproc.WorkflowTemplate{ + Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), + DagTimeout: dcl.String(d.Get("dag_timeout").(string)), + Labels: checkStringMap(d.Get("labels")), + Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), + Project: dcl.String(project), + Version: dcl.Int64OrNil(int64(d.Get("version").(int))), + } + + log.Printf("[DEBUG] Deleting WorkflowTemplate %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteWorkflowTemplate(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting WorkflowTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished deleting WorkflowTemplate %q", d.Id()) + return nil +} + +func resourceDataprocWorkflowTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/workflowTemplates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandDataprocWorkflowTemplateJobsArray(o interface{}) []dataproc.WorkflowTemplateJobs { + if o == nil { + return make([]dataproc.WorkflowTemplateJobs, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]dataproc.WorkflowTemplateJobs, 0) + } + + items := make([]dataproc.WorkflowTemplateJobs, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateJobs(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateJobs(o interface{}) *dataproc.WorkflowTemplateJobs { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobs + } + + obj := o.(map[string]interface{}) + return &dataproc.WorkflowTemplateJobs{ + StepId: dcl.String(obj["step_id"].(string)), + HadoopJob: expandDataprocWorkflowTemplateJobsHadoopJob(obj["hadoop_job"]), + HiveJob: expandDataprocWorkflowTemplateJobsHiveJob(obj["hive_job"]), + Labels: checkStringMap(obj["labels"]), + PigJob: expandDataprocWorkflowTemplateJobsPigJob(obj["pig_job"]), + PrerequisiteStepIds: expandStringArray(obj["prerequisite_step_ids"]), + PrestoJob: expandDataprocWorkflowTemplateJobsPrestoJob(obj["presto_job"]), + PysparkJob: expandDataprocWorkflowTemplateJobsPysparkJob(obj["pyspark_job"]), + Scheduling: expandDataprocWorkflowTemplateJobsScheduling(obj["scheduling"]), + SparkJob: expandDataprocWorkflowTemplateJobsSparkJob(obj["spark_job"]), + SparkRJob: expandDataprocWorkflowTemplateJobsSparkRJob(obj["spark_r_job"]), + SparkSqlJob: expandDataprocWorkflowTemplateJobsSparkSqlJob(obj["spark_sql_job"]), + } +} + +func flattenDataprocWorkflowTemplateJobsArray(objs []dataproc.WorkflowTemplateJobs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateJobs(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateJobs(obj *dataproc.WorkflowTemplateJobs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "step_id": obj.StepId, + "hadoop_job": flattenDataprocWorkflowTemplateJobsHadoopJob(obj.HadoopJob), + "hive_job": flattenDataprocWorkflowTemplateJobsHiveJob(obj.HiveJob), + "labels": obj.Labels, + "pig_job": flattenDataprocWorkflowTemplateJobsPigJob(obj.PigJob), + "prerequisite_step_ids": obj.PrerequisiteStepIds, + "presto_job": flattenDataprocWorkflowTemplateJobsPrestoJob(obj.PrestoJob), + "pyspark_job": flattenDataprocWorkflowTemplateJobsPysparkJob(obj.PysparkJob), + "scheduling": flattenDataprocWorkflowTemplateJobsScheduling(obj.Scheduling), + "spark_job": flattenDataprocWorkflowTemplateJobsSparkJob(obj.SparkJob), + "spark_r_job": flattenDataprocWorkflowTemplateJobsSparkRJob(obj.SparkRJob), + "spark_sql_job": flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj.SparkSqlJob), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateJobsHadoopJob(o interface{}) *dataproc.WorkflowTemplateJobsHadoopJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsHadoopJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsHadoopJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsHadoopJob{ + ArchiveUris: expandStringArray(obj["archive_uris"]), + Args: expandStringArray(obj["args"]), + FileUris: expandStringArray(obj["file_uris"]), + JarFileUris: expandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: checkStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJob(obj *dataproc.WorkflowTemplateJobsHadoopJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsHadoopJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsHadoopJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsHadoopJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsHadoopJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJob(o interface{}) *dataproc.WorkflowTemplateJobsHiveJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsHiveJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsHiveJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsHiveJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: expandStringArray(obj["jar_file_uris"]), + Properties: checkStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsHiveJobQueryList(obj["query_list"]), + ScriptVariables: checkStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJob(obj *dataproc.WorkflowTemplateJobsHiveJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsHiveJobQueryList(o interface{}) *dataproc.WorkflowTemplateJobsHiveJobQueryList { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsHiveJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsHiveJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsHiveJobQueryList{ + Queries: expandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj *dataproc.WorkflowTemplateJobsHiveJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJob(o interface{}) *dataproc.WorkflowTemplateJobsPigJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPigJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPigJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPigJob{ + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + JarFileUris: expandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj["logging_config"]), + Properties: checkStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPigJobQueryList(obj["query_list"]), + ScriptVariables: checkStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJob(obj *dataproc.WorkflowTemplateJobsPigJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "continue_on_failure": obj.ContinueOnFailure, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsPigJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPigJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPigJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsPigJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPigJobQueryList(o interface{}) *dataproc.WorkflowTemplateJobsPigJobQueryList { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPigJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPigJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPigJobQueryList{ + Queries: expandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj *dataproc.WorkflowTemplateJobsPigJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJob(o interface{}) *dataproc.WorkflowTemplateJobsPrestoJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPrestoJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPrestoJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPrestoJob{ + ClientTags: expandStringArray(obj["client_tags"]), + ContinueOnFailure: dcl.Bool(obj["continue_on_failure"].(bool)), + LoggingConfig: expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj["logging_config"]), + OutputFormat: dcl.String(obj["output_format"].(string)), + Properties: checkStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsPrestoJobQueryList(obj["query_list"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJob(obj *dataproc.WorkflowTemplateJobsPrestoJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "client_tags": obj.ClientTags, + "continue_on_failure": obj.ContinueOnFailure, + "logging_config": flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj.LoggingConfig), + "output_format": obj.OutputFormat, + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj.QueryList), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsPrestoJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPrestoJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPrestoJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsPrestoJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPrestoJobQueryList(o interface{}) *dataproc.WorkflowTemplateJobsPrestoJobQueryList { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPrestoJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPrestoJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPrestoJobQueryList{ + Queries: expandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj *dataproc.WorkflowTemplateJobsPrestoJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJob(o interface{}) *dataproc.WorkflowTemplateJobsPysparkJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPysparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPysparkJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPysparkJob{ + MainPythonFileUri: dcl.String(obj["main_python_file_uri"].(string)), + ArchiveUris: expandStringArray(obj["archive_uris"]), + Args: expandStringArray(obj["args"]), + FileUris: expandStringArray(obj["file_uris"]), + JarFileUris: expandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj["logging_config"]), + Properties: checkStringMap(obj["properties"]), + PythonFileUris: expandStringArray(obj["python_file_uris"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJob(obj *dataproc.WorkflowTemplateJobsPysparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_python_file_uri": obj.MainPythonFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "python_file_uris": obj.PythonFileUris, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsPysparkJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsPysparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsPysparkJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsPysparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsScheduling(o interface{}) *dataproc.WorkflowTemplateJobsScheduling { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsScheduling + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsScheduling + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsScheduling{ + MaxFailuresPerHour: dcl.Int64(int64(obj["max_failures_per_hour"].(int))), + MaxFailuresTotal: dcl.Int64(int64(obj["max_failures_total"].(int))), + } +} + +func flattenDataprocWorkflowTemplateJobsScheduling(obj *dataproc.WorkflowTemplateJobsScheduling) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "max_failures_per_hour": obj.MaxFailuresPerHour, + "max_failures_total": obj.MaxFailuresTotal, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJob(o interface{}) *dataproc.WorkflowTemplateJobsSparkJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkJob{ + ArchiveUris: expandStringArray(obj["archive_uris"]), + Args: expandStringArray(obj["args"]), + FileUris: expandStringArray(obj["file_uris"]), + JarFileUris: expandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj["logging_config"]), + MainClass: dcl.String(obj["main_class"].(string)), + MainJarFileUri: dcl.String(obj["main_jar_file_uri"].(string)), + Properties: checkStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJob(obj *dataproc.WorkflowTemplateJobsSparkJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj.LoggingConfig), + "main_class": obj.MainClass, + "main_jar_file_uri": obj.MainJarFileUri, + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsSparkJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsSparkJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJob(o interface{}) *dataproc.WorkflowTemplateJobsSparkRJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkRJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkRJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkRJob{ + MainRFileUri: dcl.String(obj["main_r_file_uri"].(string)), + ArchiveUris: expandStringArray(obj["archive_uris"]), + Args: expandStringArray(obj["args"]), + FileUris: expandStringArray(obj["file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj["logging_config"]), + Properties: checkStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJob(obj *dataproc.WorkflowTemplateJobsSparkRJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "main_r_file_uri": obj.MainRFileUri, + "archive_uris": obj.ArchiveUris, + "args": obj.Args, + "file_uris": obj.FileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsSparkRJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkRJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkRJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsSparkRJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJob(o interface{}) *dataproc.WorkflowTemplateJobsSparkSqlJob { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJob + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJob + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkSqlJob{ + JarFileUris: expandStringArray(obj["jar_file_uris"]), + LoggingConfig: expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj["logging_config"]), + Properties: checkStringMap(obj["properties"]), + QueryFileUri: dcl.String(obj["query_file_uri"].(string)), + QueryList: expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj["query_list"]), + ScriptVariables: checkStringMap(obj["script_variables"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj *dataproc.WorkflowTemplateJobsSparkSqlJob) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "jar_file_uris": obj.JarFileUris, + "logging_config": flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj.LoggingConfig), + "properties": obj.Properties, + "query_file_uri": obj.QueryFileUri, + "query_list": flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj.QueryList), + "script_variables": obj.ScriptVariables, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(o interface{}) *dataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig{ + DriverLogLevels: checkStringMap(obj["driver_log_levels"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj *dataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "driver_log_levels": obj.DriverLogLevels, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(o interface{}) *dataproc.WorkflowTemplateJobsSparkSqlJobQueryList { + if o == nil { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateJobsSparkSqlJobQueryList + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateJobsSparkSqlJobQueryList{ + Queries: expandStringArray(obj["queries"]), + } +} + +func flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj *dataproc.WorkflowTemplateJobsSparkSqlJobQueryList) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "queries": obj.Queries, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacement(o interface{}) *dataproc.WorkflowTemplatePlacement { + if o == nil { + return dataproc.EmptyWorkflowTemplatePlacement + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplatePlacement + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplatePlacement{ + ClusterSelector: expandDataprocWorkflowTemplatePlacementClusterSelector(obj["cluster_selector"]), + ManagedCluster: expandDataprocWorkflowTemplatePlacementManagedCluster(obj["managed_cluster"]), + } +} + +func flattenDataprocWorkflowTemplatePlacement(obj *dataproc.WorkflowTemplatePlacement) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_selector": flattenDataprocWorkflowTemplatePlacementClusterSelector(obj.ClusterSelector), + "managed_cluster": flattenDataprocWorkflowTemplatePlacementManagedCluster(obj.ManagedCluster), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementClusterSelector(o interface{}) *dataproc.WorkflowTemplatePlacementClusterSelector { + if o == nil { + return dataproc.EmptyWorkflowTemplatePlacementClusterSelector + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplatePlacementClusterSelector + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplatePlacementClusterSelector{ + ClusterLabels: checkStringMap(obj["cluster_labels"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplatePlacementClusterSelector(obj *dataproc.WorkflowTemplatePlacementClusterSelector) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_labels": obj.ClusterLabels, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplatePlacementManagedCluster(o interface{}) *dataproc.WorkflowTemplatePlacementManagedCluster { + if o == nil { + return dataproc.EmptyWorkflowTemplatePlacementManagedCluster + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplatePlacementManagedCluster + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplatePlacementManagedCluster{ + ClusterName: dcl.String(obj["cluster_name"].(string)), + Config: expandDataprocWorkflowTemplateClusterClusterConfig(obj["config"]), + Labels: checkStringMap(obj["labels"]), + } +} + +func flattenDataprocWorkflowTemplatePlacementManagedCluster(obj *dataproc.WorkflowTemplatePlacementManagedCluster) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cluster_name": obj.ClusterName, + "config": flattenDataprocWorkflowTemplateClusterClusterConfig(obj.Config), + "labels": obj.Labels, + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplateParametersArray(o interface{}) []dataproc.WorkflowTemplateParameters { + if o == nil { + return make([]dataproc.WorkflowTemplateParameters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]dataproc.WorkflowTemplateParameters, 0) + } + + items := make([]dataproc.WorkflowTemplateParameters, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateParameters(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateParameters(o interface{}) *dataproc.WorkflowTemplateParameters { + if o == nil { + return dataproc.EmptyWorkflowTemplateParameters + } + + obj := o.(map[string]interface{}) + return &dataproc.WorkflowTemplateParameters{ + Fields: expandStringArray(obj["fields"]), + Name: dcl.String(obj["name"].(string)), + Description: dcl.String(obj["description"].(string)), + Validation: expandDataprocWorkflowTemplateParametersValidation(obj["validation"]), + } +} + +func flattenDataprocWorkflowTemplateParametersArray(objs []dataproc.WorkflowTemplateParameters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateParameters(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateParameters(obj *dataproc.WorkflowTemplateParameters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "fields": obj.Fields, + "name": obj.Name, + "description": obj.Description, + "validation": flattenDataprocWorkflowTemplateParametersValidation(obj.Validation), + } + + return transformed + +} + +func expandDataprocWorkflowTemplateParametersValidation(o interface{}) *dataproc.WorkflowTemplateParametersValidation { + if o == nil { + return dataproc.EmptyWorkflowTemplateParametersValidation + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateParametersValidation + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateParametersValidation{ + Regex: expandDataprocWorkflowTemplateParametersValidationRegex(obj["regex"]), + Values: expandDataprocWorkflowTemplateParametersValidationValues(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidation(obj *dataproc.WorkflowTemplateParametersValidation) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regex": flattenDataprocWorkflowTemplateParametersValidationRegex(obj.Regex), + "values": flattenDataprocWorkflowTemplateParametersValidationValues(obj.Values), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationRegex(o interface{}) *dataproc.WorkflowTemplateParametersValidationRegex { + if o == nil { + return dataproc.EmptyWorkflowTemplateParametersValidationRegex + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateParametersValidationRegex + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateParametersValidationRegex{ + Regexes: expandStringArray(obj["regexes"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationRegex(obj *dataproc.WorkflowTemplateParametersValidationRegex) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "regexes": obj.Regexes, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateParametersValidationValues(o interface{}) *dataproc.WorkflowTemplateParametersValidationValues { + if o == nil { + return dataproc.EmptyWorkflowTemplateParametersValidationValues + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyWorkflowTemplateParametersValidationValues + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.WorkflowTemplateParametersValidationValues{ + Values: expandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateParametersValidationValues(obj *dataproc.WorkflowTemplateParametersValidationValues) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "values": obj.Values, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterInstanceGroupConfig(o interface{}) *dataproc.ClusterInstanceGroupConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterInstanceGroupConfig{ + Accelerators: expandDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(obj["accelerators"]), + DiskConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj["disk_config"]), + Image: dcl.String(obj["image"].(string)), + MachineType: dcl.String(obj["machine_type"].(string)), + MinCpuPlatform: dcl.StringOrNil(obj["min_cpu_platform"].(string)), + NumInstances: dcl.Int64(int64(obj["num_instances"].(int))), + Preemptibility: dataproc.ClusterInstanceGroupConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj *dataproc.ClusterInstanceGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerators": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(obj.Accelerators), + "disk_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj.DiskConfig), + "image": obj.Image, + "machine_type": obj.MachineType, + "min_cpu_platform": obj.MinCpuPlatform, + "num_instances": obj.NumInstances, + "preemptibility": obj.Preemptibility, + "instance_names": obj.InstanceNames, + "is_preemptible": obj.IsPreemptible, + "managed_group_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfig(obj.ManagedGroupConfig), + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(o interface{}) []dataproc.ClusterInstanceGroupConfigAccelerators { + if o == nil { + return nil + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return nil + } + + items := make([]dataproc.ClusterInstanceGroupConfigAccelerators, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(o interface{}) *dataproc.ClusterInstanceGroupConfigAccelerators { + if o == nil { + return nil + } + + obj := o.(map[string]interface{}) + return &dataproc.ClusterInstanceGroupConfigAccelerators{ + AcceleratorCount: dcl.Int64(int64(obj["accelerator_count"].(int))), + AcceleratorType: dcl.String(obj["accelerator_type"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(objs []dataproc.ClusterInstanceGroupConfigAccelerators) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(obj *dataproc.ClusterInstanceGroupConfigAccelerators) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "accelerator_count": obj.AcceleratorCount, + "accelerator_type": obj.AcceleratorType, + } + + return transformed + +} + +func expandDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(o interface{}) *dataproc.ClusterInstanceGroupConfigDiskConfig { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterInstanceGroupConfigDiskConfig{ + BootDiskSizeGb: dcl.Int64(int64(obj["boot_disk_size_gb"].(int))), + BootDiskType: dcl.String(obj["boot_disk_type"].(string)), + NumLocalSsds: dcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), + } +} + +func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj *dataproc.ClusterInstanceGroupConfigDiskConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "boot_disk_size_gb": obj.BootDiskSizeGb, + "boot_disk_type": obj.BootDiskType, + "num_local_ssds": obj.NumLocalSsds, + } + + return []interface{}{transformed} + +} + +func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfig(obj *dataproc.ClusterInstanceGroupConfigManagedGroupConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instance_group_manager_name": obj.InstanceGroupManagerName, + "instance_template_name": obj.InstanceTemplateName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfig(o interface{}) *dataproc.ClusterClusterConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfig{ + AutoscalingConfig: expandDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj["autoscaling_config"]), + EncryptionConfig: expandDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj["encryption_config"]), + EndpointConfig: expandDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj["endpoint_config"]), + GceClusterConfig: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj["gce_cluster_config"]), + InitializationActions: expandDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(obj["initialization_actions"]), + LifecycleConfig: expandDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj["lifecycle_config"]), + MasterConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["master_config"]), + SecondaryWorkerConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["secondary_worker_config"]), + SecurityConfig: expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj["security_config"]), + SoftwareConfig: expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj["software_config"]), + StagingBucket: dcl.String(obj["staging_bucket"].(string)), + TempBucket: dcl.String(obj["temp_bucket"].(string)), + WorkerConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["worker_config"]), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfig(obj *dataproc.ClusterClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "autoscaling_config": flattenDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj.AutoscalingConfig), + "encryption_config": flattenDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj.EncryptionConfig), + "endpoint_config": flattenDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj.EndpointConfig), + "gce_cluster_config": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj.GceClusterConfig), + "initialization_actions": flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(obj.InitializationActions), + "lifecycle_config": flattenDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj.LifecycleConfig), + "master_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.MasterConfig), + "secondary_worker_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.SecondaryWorkerConfig), + "security_config": flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj.SecurityConfig), + "software_config": flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj.SoftwareConfig), + "staging_bucket": obj.StagingBucket, + "temp_bucket": obj.TempBucket, + "worker_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.WorkerConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(o interface{}) *dataproc.ClusterClusterConfigAutoscalingConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigAutoscalingConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigAutoscalingConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigAutoscalingConfig{ + Policy: dcl.String(obj["policy"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj *dataproc.ClusterClusterConfigAutoscalingConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "policy": obj.Policy, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(o interface{}) *dataproc.ClusterClusterConfigEncryptionConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigEncryptionConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigEncryptionConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigEncryptionConfig{ + GcePdKmsKeyName: dcl.String(obj["gce_pd_kms_key_name"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj *dataproc.ClusterClusterConfigEncryptionConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "gce_pd_kms_key_name": obj.GcePdKmsKeyName, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(o interface{}) *dataproc.ClusterClusterConfigEndpointConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigEndpointConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigEndpointConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigEndpointConfig{ + EnableHttpPortAccess: dcl.Bool(obj["enable_http_port_access"].(bool)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj *dataproc.ClusterClusterConfigEndpointConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "enable_http_port_access": obj.EnableHttpPortAccess, + "http_ports": obj.HttpPorts, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(o interface{}) *dataproc.ClusterClusterConfigGceClusterConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigGceClusterConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigGceClusterConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigGceClusterConfig{ + InternalIPOnly: dcl.Bool(obj["internal_ip_only"].(bool)), + Metadata: checkStringMap(obj["metadata"]), + Network: dcl.String(obj["network"].(string)), + NodeGroupAffinity: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj["node_group_affinity"]), + PrivateIPv6GoogleAccess: dataproc.ClusterClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(obj["private_ipv6_google_access"].(string)), + ReservationAffinity: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj["reservation_affinity"]), + ServiceAccount: dcl.String(obj["service_account"].(string)), + ServiceAccountScopes: expandStringArray(obj["service_account_scopes"]), + Subnetwork: dcl.String(obj["subnetwork"].(string)), + Tags: expandStringArray(obj["tags"]), + Zone: dcl.StringOrNil(obj["zone"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj *dataproc.ClusterClusterConfigGceClusterConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "internal_ip_only": obj.InternalIPOnly, + "metadata": obj.Metadata, + "network": obj.Network, + "node_group_affinity": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj.NodeGroupAffinity), + "private_ipv6_google_access": obj.PrivateIPv6GoogleAccess, + "reservation_affinity": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj.ReservationAffinity), + "service_account": obj.ServiceAccount, + "service_account_scopes": obj.ServiceAccountScopes, + "subnetwork": obj.Subnetwork, + "tags": obj.Tags, + "zone": obj.Zone, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(o interface{}) *dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity { + if o == nil { + return dataproc.EmptyClusterClusterConfigGceClusterConfigNodeGroupAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigGceClusterConfigNodeGroupAffinity + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity{ + NodeGroup: dcl.String(obj["node_group"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj *dataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "node_group": obj.NodeGroup, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(o interface{}) *dataproc.ClusterClusterConfigGceClusterConfigReservationAffinity { + if o == nil { + return dataproc.EmptyClusterClusterConfigGceClusterConfigReservationAffinity + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigGceClusterConfigReservationAffinity + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigGceClusterConfigReservationAffinity{ + ConsumeReservationType: dataproc.ClusterClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(obj["consume_reservation_type"].(string)), + Key: dcl.String(obj["key"].(string)), + Values: expandStringArray(obj["values"]), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj *dataproc.ClusterClusterConfigGceClusterConfigReservationAffinity) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "consume_reservation_type": obj.ConsumeReservationType, + "key": obj.Key, + "values": obj.Values, + } + + return []interface{}{transformed} + +} +func expandDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(o interface{}) []dataproc.ClusterClusterConfigInitializationActions { + if o == nil { + return make([]dataproc.ClusterClusterConfigInitializationActions, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]dataproc.ClusterClusterConfigInitializationActions, 0) + } + + items := make([]dataproc.ClusterClusterConfigInitializationActions, 0, len(objs)) + for _, item := range objs { + i := expandDataprocWorkflowTemplateClusterClusterConfigInitializationActions(item) + items = append(items, *i) + } + + return items +} + +func expandDataprocWorkflowTemplateClusterClusterConfigInitializationActions(o interface{}) *dataproc.ClusterClusterConfigInitializationActions { + if o == nil { + return dataproc.EmptyClusterClusterConfigInitializationActions + } + + obj := o.(map[string]interface{}) + return &dataproc.ClusterClusterConfigInitializationActions{ + ExecutableFile: dcl.String(obj["executable_file"].(string)), + ExecutionTimeout: dcl.String(obj["execution_timeout"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(objs []dataproc.ClusterClusterConfigInitializationActions) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActions(&item) + items = append(items, i) + } + + return items +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActions(obj *dataproc.ClusterClusterConfigInitializationActions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "executable_file": obj.ExecutableFile, + "execution_timeout": obj.ExecutionTimeout, + } + + return transformed + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(o interface{}) *dataproc.ClusterClusterConfigLifecycleConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigLifecycleConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigLifecycleConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigLifecycleConfig{ + AutoDeleteTime: dcl.String(obj["auto_delete_time"].(string)), + AutoDeleteTtl: dcl.String(obj["auto_delete_ttl"].(string)), + IdleDeleteTtl: dcl.String(obj["idle_delete_ttl"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj *dataproc.ClusterClusterConfigLifecycleConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "auto_delete_time": obj.AutoDeleteTime, + "auto_delete_ttl": obj.AutoDeleteTtl, + "idle_delete_ttl": obj.IdleDeleteTtl, + "idle_start_time": obj.IdleStartTime, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(o interface{}) *dataproc.ClusterClusterConfigSecurityConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigSecurityConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigSecurityConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigSecurityConfig{ + KerberosConfig: expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj["kerberos_config"]), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj *dataproc.ClusterClusterConfigSecurityConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "kerberos_config": flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj.KerberosConfig), + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(o interface{}) *dataproc.ClusterClusterConfigSecurityConfigKerberosConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigSecurityConfigKerberosConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigSecurityConfigKerberosConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigSecurityConfigKerberosConfig{ + CrossRealmTrustAdminServer: dcl.String(obj["cross_realm_trust_admin_server"].(string)), + CrossRealmTrustKdc: dcl.String(obj["cross_realm_trust_kdc"].(string)), + CrossRealmTrustRealm: dcl.String(obj["cross_realm_trust_realm"].(string)), + CrossRealmTrustSharedPassword: dcl.String(obj["cross_realm_trust_shared_password"].(string)), + EnableKerberos: dcl.Bool(obj["enable_kerberos"].(bool)), + KdcDbKey: dcl.String(obj["kdc_db_key"].(string)), + KeyPassword: dcl.String(obj["key_password"].(string)), + Keystore: dcl.String(obj["keystore"].(string)), + KeystorePassword: dcl.String(obj["keystore_password"].(string)), + KmsKey: dcl.String(obj["kms_key"].(string)), + Realm: dcl.String(obj["realm"].(string)), + RootPrincipalPassword: dcl.String(obj["root_principal_password"].(string)), + TgtLifetimeHours: dcl.Int64(int64(obj["tgt_lifetime_hours"].(int))), + Truststore: dcl.String(obj["truststore"].(string)), + TruststorePassword: dcl.String(obj["truststore_password"].(string)), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj *dataproc.ClusterClusterConfigSecurityConfigKerberosConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cross_realm_trust_admin_server": obj.CrossRealmTrustAdminServer, + "cross_realm_trust_kdc": obj.CrossRealmTrustKdc, + "cross_realm_trust_realm": obj.CrossRealmTrustRealm, + "cross_realm_trust_shared_password": obj.CrossRealmTrustSharedPassword, + "enable_kerberos": obj.EnableKerberos, + "kdc_db_key": obj.KdcDbKey, + "key_password": obj.KeyPassword, + "keystore": obj.Keystore, + "keystore_password": obj.KeystorePassword, + "kms_key": obj.KmsKey, + "realm": obj.Realm, + "root_principal_password": obj.RootPrincipalPassword, + "tgt_lifetime_hours": obj.TgtLifetimeHours, + "truststore": obj.Truststore, + "truststore_password": obj.TruststorePassword, + } + + return []interface{}{transformed} + +} + +func expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(o interface{}) *dataproc.ClusterClusterConfigSoftwareConfig { + if o == nil { + return dataproc.EmptyClusterClusterConfigSoftwareConfig + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return dataproc.EmptyClusterClusterConfigSoftwareConfig + } + obj := objArr[0].(map[string]interface{}) + return &dataproc.ClusterClusterConfigSoftwareConfig{ + ImageVersion: dcl.String(obj["image_version"].(string)), + OptionalComponents: expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj["optional_components"]), + Properties: checkStringMap(obj["properties"]), + } +} + +func flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj *dataproc.ClusterClusterConfigSoftwareConfig) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "image_version": obj.ImageVersion, + "optional_components": flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj.OptionalComponents), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} +func flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj []dataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} + +func expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []dataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum { + objs := o.([]interface{}) + items := make([]dataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) + for _, item := range objs { + i := dataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/google/resource_dataproc_workflow_template_sweeper_test.go b/google/resource_dataproc_workflow_template_sweeper_test.go new file mode 100644 index 00000000000..ac39eeee580 --- /dev/null +++ b/google/resource_dataproc_workflow_template_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + dataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("DataprocWorkflowTemplate", &resource.Sweeper{ + Name: "DataprocWorkflowTemplate", + F: testSweepDataprocWorkflowTemplate, + }) +} + +func testSweepDataprocWorkflowTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for DataprocWorkflowTemplate") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLDataprocClient(config, config.userAgent, "", 0) + err = client.DeleteAllWorkflowTemplate(context.Background(), d["project"], d["location"], isDeletableDataprocWorkflowTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletableDataprocWorkflowTemplate(r *dataproc.WorkflowTemplate) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_eventarc_trigger.go b/google/resource_eventarc_trigger.go new file mode 100644 index 00000000000..0caeb0518c8 --- /dev/null +++ b/google/resource_eventarc_trigger.go @@ -0,0 +1,651 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" +) + +func resourceEventarcTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceEventarcTriggerCreate, + Read: resourceEventarcTriggerRead, + Update: resourceEventarcTriggerUpdate, + Delete: resourceEventarcTriggerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceEventarcTriggerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "destination": { + Type: schema.TypeList, + Required: true, + Description: "Required. Destination specifies where the events should be sent to.", + MaxItems: 1, + Elem: EventarcTriggerDestinationSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "matching_criteria": { + Type: schema.TypeSet, + Required: true, + Description: "Required. null The list of filters that applies to event attributes. Only events that match all the provided filters will be sent to the destination.", + Elem: EventarcTriggerMatchingCriteriaSchema(), + Set: schema.HashResource(EventarcTriggerMatchingCriteriaSchema()), + }, + + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The resource name of the trigger. Must be unique within the location on the project and must be in `projects/{project}/locations/{location}/triggers/{trigger}` format.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. User labels attached to the triggers that can be used to group resources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The IAM service account email associated with the trigger. The service account represents the identity of the trigger. The principal who calls this API must have `iam.serviceAccounts.actAs` permission in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts?hl=en#sa_common for more information. For Cloud Run destinations, this service account is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account for information on how to invoke authenticated Cloud Run services. In order to create Audit Log triggers, the service account should also have `roles/eventarc.eventReceiver` IAM role.", + }, + + "transport": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. In order to deliver messages, Eventarc may use other GCP products as transport intermediary. This field contains a reference to that transport intermediary. This information can be used for debugging purposes.", + MaxItems: 1, + Elem: EventarcTriggerTransportSchema(), + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The creation time.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. This checksum is computed by the server based on the value of other fields, and may be sent only on create requests to ensure the client has an up-to-date value before proceeding.", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The last-modified time.", + }, + }, + } +} + +func EventarcTriggerDestinationSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloud_function": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The Cloud Function resource name. Only Cloud Functions V2 is supported. Format: projects/{project}/locations/{location}/functions/{function}", + }, + + "cloud_run_service": { + Type: schema.TypeList, + Optional: true, + Description: "Cloud Run fully-managed service that receives the events. The service should be running in the same project of the trigger.", + MaxItems: 1, + Elem: EventarcTriggerDestinationCloudRunServiceSchema(), + }, + }, + } +} + +func EventarcTriggerDestinationCloudRunServiceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Required. The name of the Cloud Run service being addressed. See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services. Only services located in the same project of the trigger object can be addressed.", + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. The relative path on the Cloud Run service the events should be sent to. The value must conform to the definition of URI path segment (section 3.3 of RFC2396). Examples: \"/route\", \"route\", \"route/subroute\".", + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Required. The region the Cloud Run service is deployed in.", + }, + }, + } +} + +func EventarcTriggerMatchingCriteriaSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute": { + Type: schema.TypeString, + Required: true, + Description: "Required. The name of a CloudEvents attribute. Currently, only a subset of attributes are supported for filtering. All triggers MUST provide a filter for the 'type' attribute.", + }, + + "value": { + Type: schema.TypeString, + Required: true, + Description: "Required. The value for the attribute.", + }, + }, + } +} + +func EventarcTriggerTransportSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The Pub/Sub topic and subscription used by Eventarc as delivery intermediary.", + MaxItems: 1, + Elem: EventarcTriggerTransportPubsubSchema(), + }, + }, + } +} + +func EventarcTriggerTransportPubsubSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The name of the Pub/Sub topic created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME You may set an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished` only. The topic you provide here will not be deleted by Eventarc at trigger deletion.", + }, + + "subscription": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Pub/Sub subscription created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`.", + }, + }, + } +} + +func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &eventarc.Trigger{ + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTrigger(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Trigger: %s", err) + } + + log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) + + return resourceEventarcTriggerRead(d, meta) +} + +func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &eventarc.Trigger{ + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetTrigger(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("EventarcTrigger %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("destination", flattenEventarcTriggerDestination(res.Destination)); err != nil { + return fmt.Errorf("error setting destination in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("matching_criteria", flattenEventarcTriggerMatchingCriteriaArray(res.MatchingCriteria)); err != nil { + return fmt.Errorf("error setting matching_criteria in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("service_account", res.ServiceAccount); err != nil { + return fmt.Errorf("error setting service_account in state: %s", err) + } + if err = d.Set("transport", flattenEventarcTriggerTransport(res.Transport)); err != nil { + return fmt.Errorf("error setting transport in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceEventarcTriggerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &eventarc.Trigger{ + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyTrigger(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Trigger: %s", err) + } + + log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) + + return resourceEventarcTriggerRead(d, meta) +} + +func resourceEventarcTriggerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &eventarc.Trigger{ + Destination: expandEventarcTriggerDestination(d.Get("destination")), + Location: dcl.String(d.Get("location").(string)), + MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), + Name: dcl.String(d.Get("name").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + ServiceAccount: dcl.String(d.Get("service_account").(string)), + Transport: expandEventarcTriggerTransport(d.Get("transport")), + } + + log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteTrigger(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Trigger: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Trigger %q", d.Id()) + return nil +} + +func resourceEventarcTriggerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/triggers/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandEventarcTriggerDestination(o interface{}) *eventarc.TriggerDestination { + if o == nil { + return eventarc.EmptyTriggerDestination + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return eventarc.EmptyTriggerDestination + } + obj := objArr[0].(map[string]interface{}) + return &eventarc.TriggerDestination{ + CloudFunction: dcl.String(obj["cloud_function"].(string)), + CloudRunService: expandEventarcTriggerDestinationCloudRunService(obj["cloud_run_service"]), + } +} + +func flattenEventarcTriggerDestination(obj *eventarc.TriggerDestination) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cloud_function": obj.CloudFunction, + "cloud_run_service": flattenEventarcTriggerDestinationCloudRunService(obj.CloudRunService), + } + + return []interface{}{transformed} + +} + +func expandEventarcTriggerDestinationCloudRunService(o interface{}) *eventarc.TriggerDestinationCloudRunService { + if o == nil { + return eventarc.EmptyTriggerDestinationCloudRunService + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return eventarc.EmptyTriggerDestinationCloudRunService + } + obj := objArr[0].(map[string]interface{}) + return &eventarc.TriggerDestinationCloudRunService{ + Service: dcl.String(obj["service"].(string)), + Path: dcl.String(obj["path"].(string)), + Region: dcl.StringOrNil(obj["region"].(string)), + } +} + +func flattenEventarcTriggerDestinationCloudRunService(obj *eventarc.TriggerDestinationCloudRunService) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "service": obj.Service, + "path": obj.Path, + "region": obj.Region, + } + + return []interface{}{transformed} + +} +func expandEventarcTriggerMatchingCriteriaArray(o interface{}) []eventarc.TriggerMatchingCriteria { + if o == nil { + return make([]eventarc.TriggerMatchingCriteria, 0) + } + + o = o.(*schema.Set).List() + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]eventarc.TriggerMatchingCriteria, 0) + } + + items := make([]eventarc.TriggerMatchingCriteria, 0, len(objs)) + for _, item := range objs { + i := expandEventarcTriggerMatchingCriteria(item) + items = append(items, *i) + } + + return items +} + +func expandEventarcTriggerMatchingCriteria(o interface{}) *eventarc.TriggerMatchingCriteria { + if o == nil { + return eventarc.EmptyTriggerMatchingCriteria + } + + obj := o.(map[string]interface{}) + return &eventarc.TriggerMatchingCriteria{ + Attribute: dcl.String(obj["attribute"].(string)), + Value: dcl.String(obj["value"].(string)), + } +} + +func flattenEventarcTriggerMatchingCriteriaArray(objs []eventarc.TriggerMatchingCriteria) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenEventarcTriggerMatchingCriteria(&item) + items = append(items, i) + } + + return items +} + +func flattenEventarcTriggerMatchingCriteria(obj *eventarc.TriggerMatchingCriteria) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "attribute": obj.Attribute, + "value": obj.Value, + } + + return transformed + +} + +func expandEventarcTriggerTransport(o interface{}) *eventarc.TriggerTransport { + if o == nil { + return nil + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return nil + } + obj := objArr[0].(map[string]interface{}) + return &eventarc.TriggerTransport{ + Pubsub: expandEventarcTriggerTransportPubsub(obj["pubsub"]), + } +} + +func flattenEventarcTriggerTransport(obj *eventarc.TriggerTransport) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "pubsub": flattenEventarcTriggerTransportPubsub(obj.Pubsub), + } + + return []interface{}{transformed} + +} + +func expandEventarcTriggerTransportPubsub(o interface{}) *eventarc.TriggerTransportPubsub { + if o == nil { + return eventarc.EmptyTriggerTransportPubsub + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return eventarc.EmptyTriggerTransportPubsub + } + obj := objArr[0].(map[string]interface{}) + return &eventarc.TriggerTransportPubsub{ + Topic: dcl.String(obj["topic"].(string)), + } +} + +func flattenEventarcTriggerTransportPubsub(obj *eventarc.TriggerTransportPubsub) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "topic": obj.Topic, + "subscription": obj.Subscription, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_eventarc_trigger_generated_test.go b/google/resource_eventarc_trigger_generated_test.go new file mode 100644 index 00000000000..78534ffa6e1 --- /dev/null +++ b/google/resource_eventarc_trigger_generated_test.go @@ -0,0 +1,321 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccEventarcTrigger_BasicHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckEventarcTriggerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcTrigger_BasicHandWritten(context), + }, + { + ResourceName: "google_eventarc_trigger.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventarcTrigger_BasicHandWrittenUpdate0(context), + }, + { + ResourceName: "google_eventarc_trigger.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventarcTrigger_BasicHandWrittenUpdate1(context), + }, + { + ResourceName: "google_eventarc_trigger.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccEventarcTrigger_BasicHandWritten(context map[string]interface{}) string { + return Nprintf(` +resource "google_eventarc_trigger" "primary" { + name = "tf-test-name%{random_suffix}" + location = "europe-west1" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + cloud_run_service { + service = google_cloud_run_service.default.name + region = "europe-west1" + } + } + labels = { + foo = "bar" + } +} + +resource "google_pubsub_topic" "foo" { + name = "tf-test-topic%{random_suffix}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-eventarc-service%{random_suffix}" + location = "europe-west1" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +`, context) +} + +func testAccEventarcTrigger_BasicHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_eventarc_trigger" "primary" { + name = "tf-test-name%{random_suffix}" + location = "europe-west1" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + cloud_run_service { + service = google_cloud_run_service.default.name + region = "europe-west1" + } + } + transport { + pubsub { + topic = google_pubsub_topic.foo.id + } + } +} + +resource "google_pubsub_topic" "foo" { + name = "tf-test-topic%{random_suffix}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-eventarc-service%{random_suffix}" + location = "europe-west1" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_cloud_run_service" "default2" { + name = "tf-test-eventarc-service%{random_suffix}2" + location = "europe-north1" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +`, context) +} + +func testAccEventarcTrigger_BasicHandWrittenUpdate1(context map[string]interface{}) string { + return Nprintf(` +resource "google_eventarc_trigger" "primary" { + name = "tf-test-name%{random_suffix}" + location = "europe-west1" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + cloud_run_service { + service = google_cloud_run_service.default2.name + region = "europe-north1" + } + } + transport { + pubsub { + topic = google_pubsub_topic.foo.id + } + } + labels = { + foo = "bar" + } + service_account = google_service_account.eventarc-sa.email +} + +resource "google_service_account" "eventarc-sa" { + account_id = "tf-test-sa%{random_suffix}" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "foo" { + name = "tf-test-topic%{random_suffix}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-eventarc-service%{random_suffix}" + location = "europe-west1" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_cloud_run_service" "default2" { + name = "tf-test-eventarc-service%{random_suffix}2" + location = "europe-north1" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +`, context) +} + +func testAccCheckEventarcTriggerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_eventarc_trigger" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &eventarc.Trigger{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + ServiceAccount: dcl.String(rs.Primary.Attributes["service_account"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLEventarcClient(config, config.userAgent, billingProject, 0) + _, err := client.GetTrigger(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_eventarc_trigger still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_eventarc_trigger_sweeper_test.go b/google/resource_eventarc_trigger_sweeper_test.go new file mode 100644 index 00000000000..e55f689c4bb --- /dev/null +++ b/google/resource_eventarc_trigger_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("EventarcTrigger", &resource.Sweeper{ + Name: "EventarcTrigger", + F: testSweepEventarcTrigger, + }) +} + +func testSweepEventarcTrigger(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for EventarcTrigger") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLEventarcClient(config, config.userAgent, "", 0) + err = client.DeleteAllTrigger(context.Background(), d["project"], d["location"], isDeletableEventarcTrigger) + if err != nil { + return err + } + return nil +} + +func isDeletableEventarcTrigger(r *eventarc.Trigger) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_network_connectivity_hub.go b/google/resource_network_connectivity_hub.go new file mode 100644 index 00000000000..31831b67ad5 --- /dev/null +++ b/google/resource_network_connectivity_hub.go @@ -0,0 +1,369 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" +) + +func resourceNetworkConnectivityHub() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkConnectivityHubCreate, + Read: resourceNetworkConnectivityHubRead, + Update: resourceNetworkConnectivityHubUpdate, + Delete: resourceNetworkConnectivityHubDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkConnectivityHubImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Immutable. The name of the hub. Hub names must be unique. They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}`", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "An optional description of the hub.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time the hub was created.", + }, + + "routing_vpcs": { + Type: schema.TypeList, + Computed: true, + Description: "The VPC network associated with this hub's spokes. All of the VPN tunnels, VLAN attachments, and router appliance instances referenced by this hub's spokes must belong to this VPC network. This field is read-only. Network Connectivity Center automatically populates it based on the set of spokes attached to the hub.", + Elem: NetworkConnectivityHubRoutingVpcsSchema(), + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current lifecycle state of this hub. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", + }, + + "unique_id": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The Google-generated UUID for the hub. This value is unique across all hub resources. If a hub is deleted and another with the same name is created, the new hub is assigned a different unique_id.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time the hub was last updated.", + }, + }, + } +} + +func NetworkConnectivityHubRoutingVpcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Computed: true, + Description: "The URI of the VPC network.", + }, + }, + } +} + +func resourceNetworkConnectivityHubCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Hub{ + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyHub(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Hub: %s", err) + } + + log.Printf("[DEBUG] Finished creating Hub %q: %#v", d.Id(), res) + + return resourceNetworkConnectivityHubRead(d, meta) +} + +func resourceNetworkConnectivityHubRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Hub{ + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetHub(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("NetworkConnectivityHub %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("routing_vpcs", flattenNetworkConnectivityHubRoutingVpcsArray(res.RoutingVpcs)); err != nil { + return fmt.Errorf("error setting routing_vpcs in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("unique_id", res.UniqueId); err != nil { + return fmt.Errorf("error setting unique_id in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceNetworkConnectivityHubUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Hub{ + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyHub(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Hub: %s", err) + } + + log.Printf("[DEBUG] Finished creating Hub %q: %#v", d.Id(), res) + + return resourceNetworkConnectivityHubRead(d, meta) +} + +func resourceNetworkConnectivityHubDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Hub{ + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Hub %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteHub(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Hub: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Hub %q", d.Id()) + return nil +} + +func resourceNetworkConnectivityHubImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/global/hubs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/global/hubs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNetworkConnectivityHubRoutingVpcsArray(objs []networkconnectivity.HubRoutingVpcs) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenNetworkConnectivityHubRoutingVpcs(&item) + items = append(items, i) + } + + return items +} + +func flattenNetworkConnectivityHubRoutingVpcs(obj *networkconnectivity.HubRoutingVpcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + } + + return transformed + +} diff --git a/google/resource_network_connectivity_hub_generated_test.go b/google/resource_network_connectivity_hub_generated_test.go new file mode 100644 index 00000000000..29845e60269 --- /dev/null +++ b/google/resource_network_connectivity_hub_generated_test.go @@ -0,0 +1,131 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccNetworkConnectivityHub_BasicHub(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkConnectivityHubDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkConnectivityHub_BasicHub(context), + }, + { + ResourceName: "google_network_connectivity_hub.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkConnectivityHub_BasicHubUpdate0(context), + }, + { + ResourceName: "google_network_connectivity_hub.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkConnectivityHub_BasicHub(context map[string]interface{}) string { + return Nprintf(` +resource "google_network_connectivity_hub" "primary" { + name = "tf-test-hub%{random_suffix}" + description = "A sample hub" + + labels = { + label-one = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccNetworkConnectivityHub_BasicHubUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_network_connectivity_hub" "primary" { + name = "tf-test-hub%{random_suffix}" + description = "An updated sample hub" + + labels = { + label-two = "value-one" + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckNetworkConnectivityHubDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_network_connectivity_hub" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &networkconnectivity.Hub{ + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: networkconnectivity.HubStateEnumRef(rs.Primary.Attributes["state"]), + UniqueId: dcl.StringOrNil(rs.Primary.Attributes["unique_id"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLNetworkConnectivityClient(config, config.userAgent, billingProject, 0) + _, err := client.GetHub(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_network_connectivity_hub still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_network_connectivity_hub_sweeper_test.go b/google/resource_network_connectivity_hub_sweeper_test.go new file mode 100644 index 00000000000..6bdcd3ed4e3 --- /dev/null +++ b/google/resource_network_connectivity_hub_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("NetworkConnectivityHub", &resource.Sweeper{ + Name: "NetworkConnectivityHub", + F: testSweepNetworkConnectivityHub, + }) +} + +func testSweepNetworkConnectivityHub(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivityHub") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLNetworkConnectivityClient(config, config.userAgent, "", 0) + err = client.DeleteAllHub(context.Background(), d["project"], isDeletableNetworkConnectivityHub) + if err != nil { + return err + } + return nil +} + +func isDeletableNetworkConnectivityHub(r *networkconnectivity.Hub) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_network_connectivity_spoke.go b/google/resource_network_connectivity_spoke.go new file mode 100644 index 00000000000..24e8f4e0b47 --- /dev/null +++ b/google/resource_network_connectivity_spoke.go @@ -0,0 +1,626 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" +) + +func resourceNetworkConnectivitySpoke() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkConnectivitySpokeCreate, + Read: resourceNetworkConnectivitySpokeRead, + Update: resourceNetworkConnectivitySpokeUpdate, + Delete: resourceNetworkConnectivitySpokeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceNetworkConnectivitySpokeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "hub": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Immutable. The URI of the hub that this spoke is attached to.", + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Immutable. The name of the spoke. Spoke names must be unique.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "An optional description of the spoke.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "linked_interconnect_attachments": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes.", + MaxItems: 1, + Elem: NetworkConnectivitySpokeLinkedInterconnectAttachmentsSchema(), + ConflictsWith: []string{"linked_vpn_tunnels", "linked_router_appliance_instances"}, + }, + + "linked_router_appliance_instances": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The URIs of linked Router appliance resources", + MaxItems: 1, + Elem: NetworkConnectivitySpokeLinkedRouterApplianceInstancesSchema(), + ConflictsWith: []string{"linked_vpn_tunnels", "linked_interconnect_attachments"}, + }, + + "linked_vpn_tunnels": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "The URIs of linked VPN tunnel resources", + MaxItems: 1, + Elem: NetworkConnectivitySpokeLinkedVpnTunnelsSchema(), + ConflictsWith: []string{"linked_interconnect_attachments", "linked_router_appliance_instances"}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time the spoke was created.", + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The current lifecycle state of this spoke. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING", + }, + + "unique_id": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time the spoke was last updated.", + }, + }, + } +} + +func NetworkConnectivitySpokeLinkedInterconnectAttachmentsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", + }, + + "uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The URIs of linked interconnect attachment resources", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func NetworkConnectivitySpokeLinkedRouterApplianceInstancesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The list of router appliance instances", + Elem: NetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesSchema(), + }, + + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", + }, + }, + } +} + +func NetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The IP address on the VM to use for peering.", + }, + + "virtual_machine": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The URI of the virtual machine resource", + }, + }, + } +} + +func NetworkConnectivitySpokeLinkedVpnTunnelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "site_to_site_data_transfer": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: "A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations.", + }, + + "uris": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "The URIs of linked VPN tunnel resources.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceNetworkConnectivitySpokeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Spoke{ + Hub: dcl.String(d.Get("hub").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), + LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), + LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplySpoke(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Spoke: %s", err) + } + + log.Printf("[DEBUG] Finished creating Spoke %q: %#v", d.Id(), res) + + return resourceNetworkConnectivitySpokeRead(d, meta) +} + +func resourceNetworkConnectivitySpokeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Spoke{ + Hub: dcl.String(d.Get("hub").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), + LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), + LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetSpoke(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("NetworkConnectivitySpoke %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("hub", res.Hub); err != nil { + return fmt.Errorf("error setting hub in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("linked_interconnect_attachments", flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(res.LinkedInterconnectAttachments)); err != nil { + return fmt.Errorf("error setting linked_interconnect_attachments in state: %s", err) + } + if err = d.Set("linked_router_appliance_instances", flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(res.LinkedRouterApplianceInstances)); err != nil { + return fmt.Errorf("error setting linked_router_appliance_instances in state: %s", err) + } + if err = d.Set("linked_vpn_tunnels", flattenNetworkConnectivitySpokeLinkedVpnTunnels(res.LinkedVpnTunnels)); err != nil { + return fmt.Errorf("error setting linked_vpn_tunnels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("state", res.State); err != nil { + return fmt.Errorf("error setting state in state: %s", err) + } + if err = d.Set("unique_id", res.UniqueId); err != nil { + return fmt.Errorf("error setting unique_id in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourceNetworkConnectivitySpokeUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Spoke{ + Hub: dcl.String(d.Get("hub").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), + LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), + LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplySpoke(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Spoke: %s", err) + } + + log.Printf("[DEBUG] Finished creating Spoke %q: %#v", d.Id(), res) + + return resourceNetworkConnectivitySpokeRead(d, meta) +} + +func resourceNetworkConnectivitySpokeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &networkconnectivity.Spoke{ + Hub: dcl.String(d.Get("hub").(string)), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + Labels: checkStringMap(d.Get("labels")), + LinkedInterconnectAttachments: expandNetworkConnectivitySpokeLinkedInterconnectAttachments(d.Get("linked_interconnect_attachments")), + LinkedRouterApplianceInstances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(d.Get("linked_router_appliance_instances")), + LinkedVpnTunnels: expandNetworkConnectivitySpokeLinkedVpnTunnels(d.Get("linked_vpn_tunnels")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting Spoke %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLNetworkConnectivityClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteSpoke(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Spoke: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Spoke %q", d.Id()) + return nil +} + +func resourceNetworkConnectivitySpokeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/spokes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/spokes/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandNetworkConnectivitySpokeLinkedInterconnectAttachments(o interface{}) *networkconnectivity.SpokeLinkedInterconnectAttachments { + if o == nil { + return networkconnectivity.EmptySpokeLinkedInterconnectAttachments + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return networkconnectivity.EmptySpokeLinkedInterconnectAttachments + } + obj := objArr[0].(map[string]interface{}) + return &networkconnectivity.SpokeLinkedInterconnectAttachments{ + SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), + Uris: expandStringArray(obj["uris"]), + } +} + +func flattenNetworkConnectivitySpokeLinkedInterconnectAttachments(obj *networkconnectivity.SpokeLinkedInterconnectAttachments) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, + "uris": obj.Uris, + } + + return []interface{}{transformed} + +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstances(o interface{}) *networkconnectivity.SpokeLinkedRouterApplianceInstances { + if o == nil { + return networkconnectivity.EmptySpokeLinkedRouterApplianceInstances + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return networkconnectivity.EmptySpokeLinkedRouterApplianceInstances + } + obj := objArr[0].(map[string]interface{}) + return &networkconnectivity.SpokeLinkedRouterApplianceInstances{ + Instances: expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(obj["instances"]), + SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), + } +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstances(obj *networkconnectivity.SpokeLinkedRouterApplianceInstances) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "instances": flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(obj.Instances), + "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, + } + + return []interface{}{transformed} + +} +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(o interface{}) []networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances { + if o == nil { + return make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0) + } + + items := make([]networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances, 0, len(objs)) + for _, item := range objs { + i := expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(item) + items = append(items, *i) + } + + return items +} + +func expandNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(o interface{}) *networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances { + if o == nil { + return networkconnectivity.EmptySpokeLinkedRouterApplianceInstancesInstances + } + + obj := o.(map[string]interface{}) + return &networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances{ + IPAddress: dcl.String(obj["ip_address"].(string)), + VirtualMachine: dcl.String(obj["virtual_machine"].(string)), + } +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstancesArray(objs []networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(&item) + items = append(items, i) + } + + return items +} + +func flattenNetworkConnectivitySpokeLinkedRouterApplianceInstancesInstances(obj *networkconnectivity.SpokeLinkedRouterApplianceInstancesInstances) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "ip_address": obj.IPAddress, + "virtual_machine": obj.VirtualMachine, + } + + return transformed + +} + +func expandNetworkConnectivitySpokeLinkedVpnTunnels(o interface{}) *networkconnectivity.SpokeLinkedVpnTunnels { + if o == nil { + return networkconnectivity.EmptySpokeLinkedVpnTunnels + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return networkconnectivity.EmptySpokeLinkedVpnTunnels + } + obj := objArr[0].(map[string]interface{}) + return &networkconnectivity.SpokeLinkedVpnTunnels{ + SiteToSiteDataTransfer: dcl.Bool(obj["site_to_site_data_transfer"].(bool)), + Uris: expandStringArray(obj["uris"]), + } +} + +func flattenNetworkConnectivitySpokeLinkedVpnTunnels(obj *networkconnectivity.SpokeLinkedVpnTunnels) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "site_to_site_data_transfer": obj.SiteToSiteDataTransfer, + "uris": obj.Uris, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_network_connectivity_spoke_generated_test.go b/google/resource_network_connectivity_spoke_generated_test.go new file mode 100644 index 00000000000..fa383a44cc2 --- /dev/null +++ b/google/resource_network_connectivity_spoke_generated_test.go @@ -0,0 +1,227 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccNetworkConnectivitySpoke_RouterApplianceHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + "zone": getTestZoneFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkConnectivitySpokeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkConnectivitySpoke_RouterApplianceHandWritten(context), + }, + { + ResourceName: "google_network_connectivity_spoke.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkConnectivitySpoke_RouterApplianceHandWrittenUpdate0(context), + }, + { + ResourceName: "google_network_connectivity_spoke.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkConnectivitySpoke_RouterApplianceHandWritten(context map[string]interface{}) string { + return Nprintf(` + +resource "google_compute_network" "network" { + name = "tf-test-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "tf-test-subnet%{random_suffix}" + ip_cidr_range = "10.0.0.0/28" + region = "%{region}" + network = google_compute_network.network.self_link +} + +resource "google_compute_instance" "instance" { + name = "tf-test-instance%{random_suffix}" + machine_type = "e2-medium" + can_ip_forward = true + zone = "%{zone}" + + boot_disk { + initialize_params { + image = "projects/debian-cloud/global/images/debian-10-buster-v20210817" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + network_ip = "10.0.0.2" + access_config { + network_tier = "PREMIUM" + } + } +} + +resource "google_network_connectivity_hub" "basic_hub" { + name = "tf-test-hub%{random_suffix}" + description = "A sample hub" + labels = { + label-two = "value-one" + } +} + +resource "google_network_connectivity_spoke" "primary" { + name = "tf-test-name%{random_suffix}" + location = "%{region}" + description = "A sample spoke with a linked routher appliance instance" + labels = { + label-one = "value-one" + } + hub = google_network_connectivity_hub.basic_hub.id + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = "10.0.0.2" + } + site_to_site_data_transfer = true + } +} +`, context) +} + +func testAccNetworkConnectivitySpoke_RouterApplianceHandWrittenUpdate0(context map[string]interface{}) string { + return Nprintf(` + +resource "google_compute_network" "network" { + name = "tf-test-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "tf-test-subnet%{random_suffix}" + ip_cidr_range = "10.0.0.0/28" + region = "%{region}" + network = google_compute_network.network.self_link +} + +resource "google_compute_instance" "instance" { + name = "tf-test-instance%{random_suffix}" + machine_type = "e2-medium" + can_ip_forward = true + zone = "%{zone}" + + boot_disk { + initialize_params { + image = "projects/debian-cloud/global/images/debian-10-buster-v20210817" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + network_ip = "10.0.0.2" + access_config { + network_tier = "PREMIUM" + } + } +} + +resource "google_network_connectivity_hub" "basic_hub" { + name = "tf-test-hub%{random_suffix}" + description = "A sample hub" + labels = { + label-two = "value-one" + } +} + +resource "google_network_connectivity_spoke" "primary" { + name = "tf-test-name%{random_suffix}" + location = "%{region}" + description = "An UPDATED sample spoke with a linked routher appliance instance" + labels = { + label-two = "value-two" + } + hub = google_network_connectivity_hub.basic_hub.id + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = "10.0.0.2" + } + site_to_site_data_transfer = true + } +} +`, context) +} + +func testAccCheckNetworkConnectivitySpokeDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_network_connectivity_spoke" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &networkconnectivity.Spoke{ + Hub: dcl.String(rs.Primary.Attributes["hub"]), + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + State: networkconnectivity.SpokeStateEnumRef(rs.Primary.Attributes["state"]), + UniqueId: dcl.StringOrNil(rs.Primary.Attributes["unique_id"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLNetworkConnectivityClient(config, config.userAgent, billingProject, 0) + _, err := client.GetSpoke(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_network_connectivity_spoke still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_network_connectivity_spoke_sweeper_test.go b/google/resource_network_connectivity_spoke_sweeper_test.go new file mode 100644 index 00000000000..2234fb029a1 --- /dev/null +++ b/google/resource_network_connectivity_spoke_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + networkconnectivity "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/networkconnectivity" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("NetworkConnectivitySpoke", &resource.Sweeper{ + Name: "NetworkConnectivitySpoke", + F: testSweepNetworkConnectivitySpoke, + }) +} + +func testSweepNetworkConnectivitySpoke(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for NetworkConnectivitySpoke") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLNetworkConnectivityClient(config, config.userAgent, "", 0) + err = client.DeleteAllSpoke(context.Background(), d["project"], d["location"], isDeletableNetworkConnectivitySpoke) + if err != nil { + return err + } + return nil +} + +func isDeletableNetworkConnectivitySpoke(r *networkconnectivity.Spoke) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_org_policy_policy.go b/google/resource_org_policy_policy.go new file mode 100644 index 00000000000..cde72c78201 --- /dev/null +++ b/google/resource_org_policy_policy.go @@ -0,0 +1,528 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + orgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" +) + +func resourceOrgPolicyPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceOrgPolicyPolicyCreate, + Read: resourceOrgPolicyPolicyRead, + Update: resourceOrgPolicyPolicyUpdate, + Delete: resourceOrgPolicyPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOrgPolicyPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, \"projects/123/policies/compute.disableSerialPortAccess\". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.", + }, + + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The parent of the resource.", + }, + + "spec": { + Type: schema.TypeList, + Optional: true, + Description: "Basic information about the Organization Policy.", + MaxItems: 1, + Elem: OrgPolicyPolicySpecSchema(), + }, + }, + } +} + +func OrgPolicyPolicySpecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "inherit_from_parent": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the inheritance behavior for this `Policy`. If `inherit_from_parent` is true, PolicyRules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this Policy becomes the new root for evaluation. This field can be set only for Policies which configure list constraints.", + }, + + "reset": { + Type: schema.TypeBool, + Optional: true, + Description: "Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific `Constraint` at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false.", + }, + + "rules": { + Type: schema.TypeList, + Optional: true, + Description: "Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.", + Elem: OrgPolicyPolicySpecRulesSchema(), + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "An opaque tag indicating the current version of the `Policy`, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the `Policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that `Policy`.", + }, + }, + } +} + +func OrgPolicyPolicySpecRulesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all": { + Type: schema.TypeString, + Optional: true, + Description: "Setting this to true means that all values are allowed. This field can be set only in Policies for list constraints.", + }, + + "condition": { + Type: schema.TypeList, + Optional: true, + Description: "A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr' must include from 1 to 10 subexpressions, joined by the \"||\" or \"&&\" operators. Each subexpression must be of the form \"resource.matchTag('/tag_key_short_name, 'tag_value_short_name')\". or \"resource.matchTagId('tagKeys/key_id', 'tagValues/value_id')\". where key_name and value_name are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: \"resource.matchTag('123456789/environment, 'prod')\". or \"resource.matchTagId('tagKeys/123', 'tagValues/456')\".", + MaxItems: 1, + Elem: OrgPolicyPolicySpecRulesConditionSchema(), + }, + + "deny_all": { + Type: schema.TypeString, + Optional: true, + Description: "Setting this to true means that all values are denied. This field can be set only in Policies for list constraints.", + }, + + "enforce": { + Type: schema.TypeString, + Optional: true, + Description: "If `true`, then the `Policy` is enforced. If `false`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + Description: "List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints.", + MaxItems: 1, + Elem: OrgPolicyPolicySpecRulesValuesSchema(), + }, + }, + } +} + +func OrgPolicyPolicySpecRulesConditionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + }, + + "expression": { + Type: schema.TypeString, + Optional: true, + Description: "Textual representation of an expression in Common Expression Language syntax.", + }, + + "location": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + }, + + "title": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + }, + }, + } +} + +func OrgPolicyPolicySpecRulesValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_values": { + Type: schema.TypeList, + Optional: true, + Description: "List of values allowed at this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "denied_values": { + Type: schema.TypeList, + Optional: true, + Description: "List of values denied at this resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceOrgPolicyPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &orgpolicy.Policy{ + Name: dcl.String(d.Get("name").(string)), + Parent: dcl.String(d.Get("parent").(string)), + Spec: expandOrgPolicyPolicySpec(d.Get("spec")), + } + + id, err := obj.ID() + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyPolicy(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Policy: %s", err) + } + + log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) + + return resourceOrgPolicyPolicyRead(d, meta) +} + +func resourceOrgPolicyPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &orgpolicy.Policy{ + Name: dcl.String(d.Get("name").(string)), + Parent: dcl.String(d.Get("parent").(string)), + Spec: expandOrgPolicyPolicySpec(d.Get("spec")), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetPolicy(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("OrgPolicyPolicy %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("parent", res.Parent); err != nil { + return fmt.Errorf("error setting parent in state: %s", err) + } + if err = d.Set("spec", flattenOrgPolicyPolicySpec(res.Spec)); err != nil { + return fmt.Errorf("error setting spec in state: %s", err) + } + + return nil +} +func resourceOrgPolicyPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &orgpolicy.Policy{ + Name: dcl.String(d.Get("name").(string)), + Parent: dcl.String(d.Get("parent").(string)), + Spec: expandOrgPolicyPolicySpec(d.Get("spec")), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyPolicy(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Policy: %s", err) + } + + log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) + + return resourceOrgPolicyPolicyRead(d, meta) +} + +func resourceOrgPolicyPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := &orgpolicy.Policy{ + Name: dcl.String(d.Get("name").(string)), + Parent: dcl.String(d.Get("parent").(string)), + Spec: expandOrgPolicyPolicySpec(d.Get("spec")), + } + + log.Printf("[DEBUG] Deleting Policy %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeletePolicy(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Policy: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Policy %q", d.Id()) + return nil +} + +func resourceOrgPolicyPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + if err := resourceOrgPolicyPolicyCustomImport(d, config); err != nil { + return nil, fmt.Errorf("error encountered in import: %v", err) + } + + return []*schema.ResourceData{d}, nil +} + +func expandOrgPolicyPolicySpec(o interface{}) *orgpolicy.PolicySpec { + if o == nil { + return orgpolicy.EmptyPolicySpec + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return orgpolicy.EmptyPolicySpec + } + obj := objArr[0].(map[string]interface{}) + return &orgpolicy.PolicySpec{ + InheritFromParent: dcl.Bool(obj["inherit_from_parent"].(bool)), + Reset: dcl.Bool(obj["reset"].(bool)), + Rules: expandOrgPolicyPolicySpecRulesArray(obj["rules"]), + } +} + +func flattenOrgPolicyPolicySpec(obj *orgpolicy.PolicySpec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "inherit_from_parent": obj.InheritFromParent, + "reset": obj.Reset, + "rules": flattenOrgPolicyPolicySpecRulesArray(obj.Rules), + "etag": obj.Etag, + "update_time": obj.UpdateTime, + } + + return []interface{}{transformed} + +} +func expandOrgPolicyPolicySpecRulesArray(o interface{}) []orgpolicy.PolicySpecRules { + if o == nil { + return make([]orgpolicy.PolicySpecRules, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]orgpolicy.PolicySpecRules, 0) + } + + items := make([]orgpolicy.PolicySpecRules, 0, len(objs)) + for _, item := range objs { + i := expandOrgPolicyPolicySpecRules(item) + items = append(items, *i) + } + + return items +} + +func expandOrgPolicyPolicySpecRules(o interface{}) *orgpolicy.PolicySpecRules { + if o == nil { + return orgpolicy.EmptyPolicySpecRules + } + + obj := o.(map[string]interface{}) + return &orgpolicy.PolicySpecRules{ + AllowAll: expandEnumBool(obj["allow_all"].(string)), + Condition: expandOrgPolicyPolicySpecRulesCondition(obj["condition"]), + DenyAll: expandEnumBool(obj["deny_all"].(string)), + Enforce: expandEnumBool(obj["enforce"].(string)), + Values: expandOrgPolicyPolicySpecRulesValues(obj["values"]), + } +} + +func flattenOrgPolicyPolicySpecRulesArray(objs []orgpolicy.PolicySpecRules) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOrgPolicyPolicySpecRules(&item) + items = append(items, i) + } + + return items +} + +func flattenOrgPolicyPolicySpecRules(obj *orgpolicy.PolicySpecRules) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all": flattenEnumBool(obj.AllowAll), + "condition": flattenOrgPolicyPolicySpecRulesCondition(obj.Condition), + "deny_all": flattenEnumBool(obj.DenyAll), + "enforce": flattenEnumBool(obj.Enforce), + "values": flattenOrgPolicyPolicySpecRulesValues(obj.Values), + } + + return transformed + +} + +func expandOrgPolicyPolicySpecRulesCondition(o interface{}) *orgpolicy.PolicySpecRulesCondition { + if o == nil { + return orgpolicy.EmptyPolicySpecRulesCondition + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return orgpolicy.EmptyPolicySpecRulesCondition + } + obj := objArr[0].(map[string]interface{}) + return &orgpolicy.PolicySpecRulesCondition{ + Description: dcl.String(obj["description"].(string)), + Expression: dcl.String(obj["expression"].(string)), + Location: dcl.String(obj["location"].(string)), + Title: dcl.String(obj["title"].(string)), + } +} + +func flattenOrgPolicyPolicySpecRulesCondition(obj *orgpolicy.PolicySpecRulesCondition) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "description": obj.Description, + "expression": obj.Expression, + "location": obj.Location, + "title": obj.Title, + } + + return []interface{}{transformed} + +} + +func expandOrgPolicyPolicySpecRulesValues(o interface{}) *orgpolicy.PolicySpecRulesValues { + if o == nil { + return orgpolicy.EmptyPolicySpecRulesValues + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return orgpolicy.EmptyPolicySpecRulesValues + } + obj := objArr[0].(map[string]interface{}) + return &orgpolicy.PolicySpecRulesValues{ + AllowedValues: expandStringArray(obj["allowed_values"]), + DeniedValues: expandStringArray(obj["denied_values"]), + } +} + +func flattenOrgPolicyPolicySpecRulesValues(obj *orgpolicy.PolicySpecRulesValues) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allowed_values": obj.AllowedValues, + "denied_values": obj.DeniedValues, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_org_policy_policy_generated_test.go b/google/resource_org_policy_policy_generated_test.go new file mode 100644 index 00000000000..7ae1f94e0bf --- /dev/null +++ b/google/resource_org_policy_policy_generated_test.go @@ -0,0 +1,379 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + orgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccOrgPolicyPolicy_EnforcePolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_EnforcePolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_FolderPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_FolderPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_FolderPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_OrganizationPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_OrganizationPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_OrganizationPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_ProjectPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": getTestOrgFromEnv(t), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_ProjectPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_ProjectPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} + +func testAccOrgPolicyPolicy_EnforcePolicy(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + enforce = "FALSE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_FolderPolicy(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = true + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder%{random_suffix}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_FolderPolicyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = false + + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder%{random_suffix}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_OrganizationPolicy(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "organizations/%{org_id}/policies/gcp.detailedAuditLoggingMode" + parent = "organizations/%{org_id}" + + spec { + reset = true + } +} + + +`, context) +} + +func testAccOrgPolicyPolicy_OrganizationPolicyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "organizations/%{org_id}/policies/gcp.detailedAuditLoggingMode" + parent = "organizations/%{org_id}" + + spec { + reset = false + + rules { + enforce = "TRUE" + } + } +} + + +`, context) +} + +func testAccOrgPolicyPolicy_ProjectPolicy(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + location = "sample-location.log" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_ProjectPolicyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A new sample condition for the policy" + expression = "false" + location = "new-sample-location.log" + title = "new-sample-condition" + } + + values { + allowed_values = ["projects/new-allowed-project"] + denied_values = ["projects/new-denied-project"] + } + } + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccCheckOrgPolicyPolicyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_org_policy_policy" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &orgpolicy.Policy{ + Name: dcl.String(rs.Primary.Attributes["name"]), + Parent: dcl.String(rs.Primary.Attributes["parent"]), + } + + client := NewDCLOrgPolicyClient(config, config.userAgent, billingProject, 0) + _, err := client.GetPolicy(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_org_policy_policy still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_os_config_os_policy_assignment.go b/google/resource_os_config_os_policy_assignment.go new file mode 100644 index 00000000000..94b6e07f122 --- /dev/null +++ b/google/resource_os_config_os_policy_assignment.go @@ -0,0 +1,3111 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" +) + +func resourceOsConfigOsPolicyAssignment() *schema.Resource { + return &schema.Resource{ + Create: resourceOsConfigOsPolicyAssignmentCreate, + Read: resourceOsConfigOsPolicyAssignmentRead, + Update: resourceOsConfigOsPolicyAssignmentUpdate, + Delete: resourceOsConfigOsPolicyAssignmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceOsConfigOsPolicyAssignmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "instance_filter": { + Type: schema.TypeList, + Required: true, + Description: "Required. Filter to select VMs.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentInstanceFilterSchema(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Resource name.", + }, + + "os_policies": { + Type: schema.TypeList, + Required: true, + Description: "Required. List of OS policies to be applied to the VMs.", + Elem: OsConfigOsPolicyAssignmentOSPoliciesSchema(), + }, + + "rollout": { + Type: schema.TypeList, + Required: true, + Description: "Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentRolloutSchema(), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "OS policy assignment description. Length of the description is limited to 1024 characters.", + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "baseline": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field.", + }, + + "deleted": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Indicates that this revision deletes the OS policy assignment.", + }, + + "etag": { + Type: schema.TypeString, + Computed: true, + Description: "The etag for this OS policy assignment. If this is provided on update, it must match the server's etag.", + }, + + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING", + }, + + "revision_create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The timestamp that the revision was created.", + }, + + "revision_id": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment", + }, + + "rollout_state": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. OS policy assignment rollout state Possible values: ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED", + }, + + "uid": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. Server generated unique id for the OS policy assignment resource.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentInstanceFilterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all": { + Type: schema.TypeBool, + Optional: true, + Description: "Target all VMs in the project. If true, no other criteria is permitted.", + }, + + "exclusion_labels": { + Type: schema.TypeList, + Optional: true, + Description: "List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM.", + Elem: OsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsSchema(), + }, + + "inclusion_labels": { + Type: schema.TypeList, + Optional: true, + Description: "List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM.", + Elem: OsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsSchema(), + }, + + "inventories": { + Type: schema.TypeList, + Optional: true, + Description: "List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories.", + Elem: OsConfigOsPolicyAssignmentInstanceFilterInventoriesSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func OsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func OsConfigOsPolicyAssignmentInstanceFilterInventoriesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "os_short_name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The OS short name", + }, + + "os_version": { + Type: schema.TypeString, + Optional: true, + Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: "Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment.", + }, + + "mode": { + Type: schema.TypeString, + Required: true, + Description: "Required. Policy mode Possible values: MODE_UNSPECIFIED, VALIDATION, ENFORCEMENT", + }, + + "resource_groups": { + Type: schema.TypeList, + Required: true, + Description: "Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match`", + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsSchema(), + }, + + "allow_no_resource_group_match": { + Type: schema.TypeBool, + Optional: true, + Description: "This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Policy description. Length of the description is limited to 1024 characters.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resources": { + Type: schema.TypeList, + Required: true, + Description: "Required. List of resources configured for this resource group. The resources are executed in the exact order specified here.", + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesSchema(), + }, + + "inventory_filters": { + Type: schema.TypeList, + Optional: true, + Description: "List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally.", + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: "Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy.", + }, + + "exec": { + Type: schema.TypeList, + Optional: true, + Description: "Exec resource", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSchema(), + }, + + "file": { + Type: schema.TypeList, + Optional: true, + Description: "File resource", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSchema(), + }, + + "pkg": { + Type: schema.TypeList, + Optional: true, + Description: "Package resource", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSchema(), + }, + + "repository": { + Type: schema.TypeList, + Optional: true, + Description: "Package repository resource", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "validate": { + Type: schema.TypeList, + Required: true, + Description: "Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates \"in desired state\", and exit code of 101 indicates \"not in desired state\". Any other exit code indicates a failure running validate.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSchema(), + }, + + "enforce": { + Type: schema.TypeList, + Optional: true, + Description: "What to run to bring this resource into the desired state. An exit code of 100 indicates \"success\", any other exit code indicates a failure running enforce.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interpreter": { + Type: schema.TypeString, + Required: true, + Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", + }, + + "args": { + Type: schema.TypeList, + Optional: true, + Description: "Optional arguments to pass to the source during execution.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file": { + Type: schema.TypeList, + Optional: true, + Description: "A remote or local file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSchema(), + }, + + "output_file_path": { + Type: schema.TypeString, + Optional: true, + Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", + }, + + "script": { + Type: schema.TypeString, + Optional: true, + Description: "An inline script. The size of the script is limited to 1024 characters.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interpreter": { + Type: schema.TypeString, + Required: true, + Description: "Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL", + }, + + "args": { + Type: schema.TypeList, + Optional: true, + Description: "Optional arguments to pass to the source during execution.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file": { + Type: schema.TypeList, + Optional: true, + Description: "A remote or local file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSchema(), + }, + + "output_file_path": { + Type: schema.TypeString, + Optional: true, + Description: "Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes.", + }, + + "script": { + Type: schema.TypeString, + Optional: true, + Description: "An inline script. The size of the script is limited to 1024 characters.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: "Required. The absolute path of the file within the VM.", + }, + + "state": { + Type: schema.TypeString, + Required: true, + Description: "Required. Desired state of the file. Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE", + }, + + "content": { + Type: schema.TypeString, + Optional: true, + Description: "A a file with this content. The size of the content is limited to 1024 characters.", + }, + + "file": { + Type: schema.TypeList, + Optional: true, + Description: "A remote or local source.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSchema(), + }, + + "permissions": { + Type: schema.TypeString, + Computed: true, + Description: "Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "desired_state": { + Type: schema.TypeString, + Required: true, + Description: "Required. The desired state the agent should maintain for this package. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED", + }, + + "apt": { + Type: schema.TypeList, + Optional: true, + Description: "A package managed by Apt.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSchema(), + }, + + "deb": { + Type: schema.TypeList, + Optional: true, + Description: "A deb package file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSchema(), + }, + + "googet": { + Type: schema.TypeList, + Optional: true, + Description: "A package managed by GooGet.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSchema(), + }, + + "msi": { + Type: schema.TypeList, + Optional: true, + Description: "An MSI package.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSchema(), + }, + + "rpm": { + Type: schema.TypeList, + Optional: true, + Description: "An rpm package file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSchema(), + }, + + "yum": { + Type: schema.TypeList, + Optional: true, + Description: "A package managed by YUM.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSchema(), + }, + + "zypper": { + Type: schema.TypeList, + Optional: true, + Description: "A package managed by Zypper.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. Package name.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: "Required. A deb package.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSchema(), + }, + + "pull_deps": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb`", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. Package name.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: "Required. The MSI package.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSchema(), + }, + + "properties": { + Type: schema.TypeList, + Optional: true, + Description: "Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeList, + Required: true, + Description: "Required. An rpm package.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSchema(), + }, + + "pull_deps": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm`", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_insecure": { + Type: schema.TypeBool, + Optional: true, + Description: "Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified.", + }, + + "gcs": { + Type: schema.TypeList, + Optional: true, + Description: "A Cloud Storage object.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSchema(), + }, + + "local_path": { + Type: schema.TypeString, + Optional: true, + Description: "A local path within the VM to use.", + }, + + "remote": { + Type: schema.TypeList, + Optional: true, + Description: "A generic remote file.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "Required. Bucket of the Cloud Storage object.", + }, + + "object": { + Type: schema.TypeString, + Required: true, + Description: "Required. Name of the Cloud Storage object.", + }, + + "generation": { + Type: schema.TypeInt, + Optional: true, + Description: "Generation number of the Cloud Storage object.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`.", + }, + + "sha256_checksum": { + Type: schema.TypeString, + Optional: true, + Description: "SHA256 checksum of the remote file.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. Package name.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. Package name.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositorySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "apt": { + Type: schema.TypeList, + Optional: true, + Description: "An Apt Repository.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSchema(), + }, + + "goo": { + Type: schema.TypeList, + Optional: true, + Description: "A Goo Repository.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSchema(), + }, + + "yum": { + Type: schema.TypeList, + Optional: true, + Description: "A Yum Repository.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSchema(), + }, + + "zypper": { + Type: schema.TypeList, + Optional: true, + Description: "A Zypper Repository.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSchema(), + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_type": { + Type: schema.TypeString, + Required: true, + Description: "Required. Type of archive files in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, DEB, DEB_SRC", + }, + + "components": { + Type: schema.TypeList, + Required: true, + Description: "Required. List of components for this repository. Must contain at least one item.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "distribution": { + Type: schema.TypeString, + Required: true, + Description: "Required. Distribution of this repository.", + }, + + "uri": { + Type: schema.TypeString, + Required: true, + Description: "Required. URI for this repository.", + }, + + "gpg_key": { + Type: schema.TypeString, + Optional: true, + Description: "URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The name of the repository.", + }, + + "url": { + Type: schema.TypeString, + Required: true, + Description: "Required. The url of the repository.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_url": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location of the repository directory.", + }, + + "id": { + Type: schema.TypeString, + Required: true, + Description: "Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "The display name of the repository.", + }, + + "gpg_keys": { + Type: schema.TypeList, + Optional: true, + Description: "URIs of GPG keys.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_url": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location of the repository directory.", + }, + + "id": { + Type: schema.TypeString, + Required: true, + Description: "Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts.", + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: "The display name of the repository.", + }, + + "gpg_keys": { + Type: schema.TypeList, + Optional: true, + Description: "URIs of GPG keys.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func OsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "os_short_name": { + Type: schema.TypeString, + Required: true, + Description: "Required. The OS short name", + }, + + "os_version": { + Type: schema.TypeString, + Optional: true, + Description: "The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentRolloutSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disruption_budget": { + Type: schema.TypeList, + Required: true, + Description: "Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment.", + MaxItems: 1, + Elem: OsConfigOsPolicyAssignmentRolloutDisruptionBudgetSchema(), + }, + + "min_wait_duration": { + Type: schema.TypeString, + Required: true, + Description: "Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied.", + }, + }, + } +} + +func OsConfigOsPolicyAssignmentRolloutDisruptionBudgetSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": { + Type: schema.TypeInt, + Optional: true, + Description: "Specifies a fixed value.", + }, + + "percent": { + Type: schema.TypeInt, + Optional: true, + Description: "Specifies the relative value defined as a percentage, which will be multiplied by a reference value.", + }, + }, + } +} + +func resourceOsConfigOsPolicyAssignmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &osconfig.OSPolicyAssignment{ + InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), + Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating OSPolicyAssignment: %s", err) + } + + log.Printf("[DEBUG] Finished creating OSPolicyAssignment %q: %#v", d.Id(), res) + + return resourceOsConfigOsPolicyAssignmentRead(d, meta) +} + +func resourceOsConfigOsPolicyAssignmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &osconfig.OSPolicyAssignment{ + InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), + Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetOSPolicyAssignment(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("OsConfigOsPolicyAssignment %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("instance_filter", flattenOsConfigOsPolicyAssignmentInstanceFilter(res.InstanceFilter)); err != nil { + return fmt.Errorf("error setting instance_filter in state: %s", err) + } + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("os_policies", flattenOsConfigOsPolicyAssignmentOSPoliciesArray(res.OSPolicies)); err != nil { + return fmt.Errorf("error setting os_policies in state: %s", err) + } + if err = d.Set("rollout", flattenOsConfigOsPolicyAssignmentRollout(res.Rollout)); err != nil { + return fmt.Errorf("error setting rollout in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("baseline", res.Baseline); err != nil { + return fmt.Errorf("error setting baseline in state: %s", err) + } + if err = d.Set("deleted", res.Deleted); err != nil { + return fmt.Errorf("error setting deleted in state: %s", err) + } + if err = d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("error setting etag in state: %s", err) + } + if err = d.Set("reconciling", res.Reconciling); err != nil { + return fmt.Errorf("error setting reconciling in state: %s", err) + } + if err = d.Set("revision_create_time", res.RevisionCreateTime); err != nil { + return fmt.Errorf("error setting revision_create_time in state: %s", err) + } + if err = d.Set("revision_id", res.RevisionId); err != nil { + return fmt.Errorf("error setting revision_id in state: %s", err) + } + if err = d.Set("rollout_state", res.RolloutState); err != nil { + return fmt.Errorf("error setting rollout_state in state: %s", err) + } + if err = d.Set("uid", res.Uid); err != nil { + return fmt.Errorf("error setting uid in state: %s", err) + } + + return nil +} +func resourceOsConfigOsPolicyAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &osconfig.OSPolicyAssignment{ + InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), + Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyOSPolicyAssignment(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating OSPolicyAssignment: %s", err) + } + + log.Printf("[DEBUG] Finished creating OSPolicyAssignment %q: %#v", d.Id(), res) + + return resourceOsConfigOsPolicyAssignmentRead(d, meta) +} + +func resourceOsConfigOsPolicyAssignmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &osconfig.OSPolicyAssignment{ + InstanceFilter: expandOsConfigOsPolicyAssignmentInstanceFilter(d.Get("instance_filter")), + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + OSPolicies: expandOsConfigOsPolicyAssignmentOSPoliciesArray(d.Get("os_policies")), + Rollout: expandOsConfigOsPolicyAssignmentRollout(d.Get("rollout")), + Description: dcl.String(d.Get("description").(string)), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting OSPolicyAssignment %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLOsConfigClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteOSPolicyAssignment(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting OSPolicyAssignment: %s", err) + } + + log.Printf("[DEBUG] Finished deleting OSPolicyAssignment %q", d.Id()) + return nil +} + +func resourceOsConfigOsPolicyAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/osPolicyAssignments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandOsConfigOsPolicyAssignmentInstanceFilter(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilter { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentInstanceFilter + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentInstanceFilter + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentInstanceFilter{ + All: dcl.Bool(obj["all"].(bool)), + ExclusionLabels: expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(obj["exclusion_labels"]), + InclusionLabels: expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(obj["inclusion_labels"]), + Inventories: expandOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(obj["inventories"]), + } +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilter(obj *osconfig.OSPolicyAssignmentInstanceFilter) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "all": obj.All, + "exclusion_labels": flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(obj.ExclusionLabels), + "inclusion_labels": flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(obj.InclusionLabels), + "inventories": flattenOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(obj.Inventories), + } + + return []interface{}{transformed} + +} +func expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels { + if o == nil { + return make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0) + } + + items := make([]osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentInstanceFilterExclusionLabels + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels{ + Labels: checkStringMap(obj["labels"]), + } +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabelsArray(objs []osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterExclusionLabels(obj *osconfig.OSPolicyAssignmentInstanceFilterExclusionLabels) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "labels": obj.Labels, + } + + return transformed + +} +func expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels { + if o == nil { + return make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0) + } + + items := make([]osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentInstanceFilterInclusionLabels + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels{ + Labels: checkStringMap(obj["labels"]), + } +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabelsArray(objs []osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterInclusionLabels(obj *osconfig.OSPolicyAssignmentInstanceFilterInclusionLabels) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "labels": obj.Labels, + } + + return transformed + +} +func expandOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(o interface{}) []osconfig.OSPolicyAssignmentInstanceFilterInventories { + if o == nil { + return make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0) + } + + items := make([]osconfig.OSPolicyAssignmentInstanceFilterInventories, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentInstanceFilterInventories(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentInstanceFilterInventories(o interface{}) *osconfig.OSPolicyAssignmentInstanceFilterInventories { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentInstanceFilterInventories + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentInstanceFilterInventories{ + OSShortName: dcl.String(obj["os_short_name"].(string)), + OSVersion: dcl.String(obj["os_version"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterInventoriesArray(objs []osconfig.OSPolicyAssignmentInstanceFilterInventories) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentInstanceFilterInventories(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentInstanceFilterInventories(obj *osconfig.OSPolicyAssignmentInstanceFilterInventories) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "os_short_name": obj.OSShortName, + "os_version": obj.OSVersion, + } + + return transformed + +} +func expandOsConfigOsPolicyAssignmentOSPoliciesArray(o interface{}) []osconfig.OSPolicyAssignmentOSPolicies { + if o == nil { + return make([]osconfig.OSPolicyAssignmentOSPolicies, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentOSPolicies, 0) + } + + items := make([]osconfig.OSPolicyAssignmentOSPolicies, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentOSPolicies(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentOSPolicies(o interface{}) *osconfig.OSPolicyAssignmentOSPolicies { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPolicies + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPolicies{ + Id: dcl.String(obj["id"].(string)), + Mode: osconfig.OSPolicyAssignmentOSPoliciesModeEnumRef(obj["mode"].(string)), + ResourceGroups: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(obj["resource_groups"]), + AllowNoResourceGroupMatch: dcl.Bool(obj["allow_no_resource_group_match"].(bool)), + Description: dcl.String(obj["description"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesArray(objs []osconfig.OSPolicyAssignmentOSPolicies) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentOSPolicies(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentOSPolicies(obj *osconfig.OSPolicyAssignmentOSPolicies) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "id": obj.Id, + "mode": obj.Mode, + "resource_groups": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(obj.ResourceGroups), + "allow_no_resource_group_match": obj.AllowNoResourceGroupMatch, + "description": obj.Description, + } + + return transformed + +} +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroups { + if o == nil { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0) + } + + items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroups, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroups { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroups + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroups{ + Resources: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(obj["resources"]), + InventoryFilters: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(obj["inventory_filters"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroups) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroups(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroups) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "resources": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(obj.Resources), + "inventory_filters": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(obj.InventoryFilters), + } + + return transformed + +} +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources { + if o == nil { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0) + } + + items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResources + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources{ + Id: dcl.String(obj["id"].(string)), + Exec: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj["exec"]), + File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj["file"]), + Pkg: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj["pkg"]), + Repository: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj["repository"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResources(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResources) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "id": obj.Id, + "exec": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj.Exec), + "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj.File), + "pkg": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj.Pkg), + "repository": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj.Repository), + } + + return transformed + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{ + Validate: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj["validate"]), + Enforce: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj["enforce"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "validate": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj.Validate), + "enforce": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj.Enforce), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{ + Interpreter: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumRef(obj["interpreter"].(string)), + Args: expandStringArray(obj["args"]), + File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj["file"]), + OutputFilePath: dcl.String(obj["output_file_path"].(string)), + Script: dcl.String(obj["script"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "interpreter": obj.Interpreter, + "args": obj.Args, + "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj.File), + "output_file_path": obj.OutputFilePath, + "script": obj.Script, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{ + Interpreter: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumRef(obj["interpreter"].(string)), + Args: expandStringArray(obj["args"]), + File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj["file"]), + OutputFilePath: dcl.String(obj["output_file_path"].(string)), + Script: dcl.String(obj["script"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "interpreter": obj.Interpreter, + "args": obj.Args, + "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj.File), + "output_file_path": obj.OutputFilePath, + "script": obj.Script, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{ + Path: dcl.String(obj["path"].(string)), + State: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumRef(obj["state"].(string)), + Content: dcl.String(obj["content"].(string)), + File: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj["file"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "path": obj.Path, + "state": obj.State, + "content": obj.Content, + "file": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj.File), + "permissions": obj.Permissions, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{ + DesiredState: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumRef(obj["desired_state"].(string)), + Apt: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj["apt"]), + Deb: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj["deb"]), + Googet: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj["googet"]), + Msi: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj["msi"]), + Rpm: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj["rpm"]), + Yum: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj["yum"]), + Zypper: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj["zypper"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "desired_state": obj.DesiredState, + "apt": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj.Apt), + "deb": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj.Deb), + "googet": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj.Googet), + "msi": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj.Msi), + "rpm": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj.Rpm), + "yum": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj.Yum), + "zypper": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj.Zypper), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{ + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{ + Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj["source"]), + PullDeps: dcl.Bool(obj["pull_deps"].(bool)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj.Source), + "pull_deps": obj.PullDeps, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{ + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{ + Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj["source"]), + Properties: expandStringArray(obj["properties"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj.Source), + "properties": obj.Properties, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{ + Source: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj["source"]), + PullDeps: dcl.Bool(obj["pull_deps"].(bool)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "source": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj.Source), + "pull_deps": obj.PullDeps, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{ + AllowInsecure: dcl.Bool(obj["allow_insecure"].(bool)), + Gcs: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj["gcs"]), + LocalPath: dcl.String(obj["local_path"].(string)), + Remote: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj["remote"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_insecure": obj.AllowInsecure, + "gcs": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj.Gcs), + "local_path": obj.LocalPath, + "remote": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj.Remote), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{ + Bucket: dcl.String(obj["bucket"].(string)), + Object: dcl.String(obj["object"].(string)), + Generation: dcl.Int64(int64(obj["generation"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "bucket": obj.Bucket, + "object": obj.Object, + "generation": obj.Generation, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{ + Uri: dcl.String(obj["uri"].(string)), + Sha256Checksum: dcl.String(obj["sha256_checksum"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "uri": obj.Uri, + "sha256_checksum": obj.Sha256Checksum, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{ + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{ + Name: dcl.String(obj["name"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{ + Apt: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj["apt"]), + Goo: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj["goo"]), + Yum: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj["yum"]), + Zypper: expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj["zypper"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "apt": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj.Apt), + "goo": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj.Goo), + "yum": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj.Yum), + "zypper": flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj.Zypper), + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{ + ArchiveType: osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumRef(obj["archive_type"].(string)), + Components: expandStringArray(obj["components"]), + Distribution: dcl.String(obj["distribution"].(string)), + Uri: dcl.String(obj["uri"].(string)), + GpgKey: dcl.String(obj["gpg_key"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "archive_type": obj.ArchiveType, + "components": obj.Components, + "distribution": obj.Distribution, + "uri": obj.Uri, + "gpg_key": obj.GpgKey, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{ + Name: dcl.String(obj["name"].(string)), + Url: dcl.String(obj["url"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "name": obj.Name, + "url": obj.Url, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{ + BaseUrl: dcl.String(obj["base_url"].(string)), + Id: dcl.String(obj["id"].(string)), + DisplayName: dcl.String(obj["display_name"].(string)), + GpgKeys: expandStringArray(obj["gpg_keys"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "base_url": obj.BaseUrl, + "id": obj.Id, + "display_name": obj.DisplayName, + "gpg_keys": obj.GpgKeys, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{ + BaseUrl: dcl.String(obj["base_url"].(string)), + Id: dcl.String(obj["id"].(string)), + DisplayName: dcl.String(obj["display_name"].(string)), + GpgKeys: expandStringArray(obj["gpg_keys"]), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "base_url": obj.BaseUrl, + "id": obj.Id, + "display_name": obj.DisplayName, + "gpg_keys": obj.GpgKeys, + } + + return []interface{}{transformed} + +} +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(o interface{}) []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { + if o == nil { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0) + } + + items := make([]osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, 0, len(objs)) + for _, item := range objs { + i := expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(item) + items = append(items, *i) + } + + return items +} + +func expandOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(o interface{}) *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters + } + + obj := o.(map[string]interface{}) + return &osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{ + OSShortName: dcl.String(obj["os_short_name"].(string)), + OSVersion: dcl.String(obj["os_version"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersArray(objs []osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(&item) + items = append(items, i) + } + + return items +} + +func flattenOsConfigOsPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(obj *osconfig.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "os_short_name": obj.OSShortName, + "os_version": obj.OSVersion, + } + + return transformed + +} + +func expandOsConfigOsPolicyAssignmentRollout(o interface{}) *osconfig.OSPolicyAssignmentRollout { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentRollout + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentRollout + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentRollout{ + DisruptionBudget: expandOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj["disruption_budget"]), + MinWaitDuration: dcl.String(obj["min_wait_duration"].(string)), + } +} + +func flattenOsConfigOsPolicyAssignmentRollout(obj *osconfig.OSPolicyAssignmentRollout) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "disruption_budget": flattenOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj.DisruptionBudget), + "min_wait_duration": obj.MinWaitDuration, + } + + return []interface{}{transformed} + +} + +func expandOsConfigOsPolicyAssignmentRolloutDisruptionBudget(o interface{}) *osconfig.OSPolicyAssignmentRolloutDisruptionBudget { + if o == nil { + return osconfig.EmptyOSPolicyAssignmentRolloutDisruptionBudget + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return osconfig.EmptyOSPolicyAssignmentRolloutDisruptionBudget + } + obj := objArr[0].(map[string]interface{}) + return &osconfig.OSPolicyAssignmentRolloutDisruptionBudget{ + Fixed: dcl.Int64(int64(obj["fixed"].(int))), + Percent: dcl.Int64(int64(obj["percent"].(int))), + } +} + +func flattenOsConfigOsPolicyAssignmentRolloutDisruptionBudget(obj *osconfig.OSPolicyAssignmentRolloutDisruptionBudget) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "fixed": obj.Fixed, + "percent": obj.Percent, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_os_config_os_policy_assignment_generated_test.go b/google/resource_os_config_os_policy_assignment_generated_test.go new file mode 100644 index 00000000000..137ece37c54 --- /dev/null +++ b/google/resource_os_config_os_policy_assignment_generated_test.go @@ -0,0 +1,1522 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignment(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "zone": getTestZoneFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOsConfigOsPolicyAssignmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignment(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + { + Config: testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignmentUpdate0(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + { + Config: testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignmentUpdate1(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + }, + }) +} +func TestAccOsConfigOsPolicyAssignment_PercentOsPolicyAssignment(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "zone": getTestZoneFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckOsConfigOsPolicyAssignmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOsConfigOsPolicyAssignment_PercentOsPolicyAssignment(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + { + Config: testAccOsConfigOsPolicyAssignment_PercentOsPolicyAssignmentUpdate0(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + }, + }) +} + +func testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignment(context map[string]interface{}) string { + return Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + + exclusion_labels { + labels = { + label-two = "value-two" + } + } + + inclusion_labels { + labels = { + label-one = "value-one" + } + } + + inventories { + os_short_name = "centos" + os_version = "8.*" + } + } + + location = "%{zone}" + name = "tf-test-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt" + + pkg { + desired_state = "INSTALLED" + + apt { + name = "bazel" + } + } + } + + resources { + id = "deb1" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + local_path = "$HOME/package.deb" + } + } + } + } + + resources { + id = "deb2" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + allow_insecure = true + + remote { + uri = "ftp.us.debian.org/debian/package.deb" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + + pull_deps = true + } + } + } + + resources { + id = "deb3" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + pull_deps = true + } + } + } + + resources { + id = "yum" + + pkg { + desired_state = "INSTALLED" + + yum { + name = "gstreamer-plugins-base-devel.x86_64" + } + } + } + + resources { + id = "zypper" + + pkg { + desired_state = "INSTALLED" + + zypper { + name = "gcc" + } + } + } + + resources { + id = "rpm1" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + local_path = "$HOME/package.rpm" + } + + pull_deps = true + } + } + } + + resources { + id = "rpm2" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + allow_insecure = true + + remote { + uri = "https://mirror.jaleco.com/centos/8.3.2011/BaseOS/x86_64/os/Packages/efi-filesystem-3-2.el8.noarch.rpm" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + } + } + } + + resources { + id = "rpm3" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + } + + inventory_filters { + os_short_name = "centos" + os_version = "8.*" + } + } + + resource_groups { + resources { + id = "apt-to-deb" + + pkg { + desired_state = "INSTALLED" + + apt { + name = "bazel" + } + } + } + + resources { + id = "deb-local-path-to-gcs" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + local_path = "$HOME/package.deb" + } + } + } + } + + resources { + id = "googet" + + pkg { + desired_state = "INSTALLED" + + googet { + name = "gcc" + } + } + } + + resources { + id = "msi1" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + local_path = "$HOME/package.msi" + } + + properties = ["REBOOT=ReallySuppress"] + } + } + } + + resources { + id = "msi2" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + allow_insecure = true + + remote { + uri = "https://remote.uri.com/package.msi" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + } + } + } + + resources { + id = "msi3" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + } + } + + allow_no_resource_group_match = false + description = "A test os policy" + } + + rollout { + disruption_budget { + fixed = 1 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "%{project_name}" +} + + +`, context) +} + +func testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignmentUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + + inventories { + os_short_name = "" + os_version = "9.*" + } + } + + location = "%{zone}" + name = "tf-test-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "ENFORCEMENT" + + resource_groups { + resources { + id = "apt" + + pkg { + desired_state = "INSTALLED" + + apt { + name = "firefox" + } + } + } + + resources { + id = "new-deb1" + + pkg { + desired_state = "REMOVED" + + deb { + source { + local_path = "$HOME/new-package.deb" + } + } + } + } + + resources { + id = "new-deb2" + + pkg { + desired_state = "REMOVED" + + deb { + source { + allow_insecure = false + + remote { + uri = "ftp.us.debian.org/debian/new-package.deb" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + + pull_deps = false + } + } + } + + resources { + id = "new-yum" + + pkg { + desired_state = "REMOVED" + + yum { + name = "vlc.x86_64" + } + } + } + + resources { + id = "new-zypper" + + pkg { + desired_state = "REMOVED" + + zypper { + name = "ModemManager" + } + } + } + + resources { + id = "new-rpm1" + + pkg { + desired_state = "REMOVED" + + rpm { + source { + local_path = "$HOME/new-package.rpm" + } + + pull_deps = false + } + } + } + + resources { + id = "new-rpm2" + + pkg { + desired_state = "REMOVED" + + rpm { + source { + allow_insecure = false + + remote { + uri = "https://mirror.jaleco.com/centos/8.3.2011/BaseOS/x86_64/os/Packages/NetworkManager-adsl-1.26.0-12.el8_3.x86_64.rpm" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + } + } + } + + resources { + id = "new-rpm3" + + pkg { + desired_state = "REMOVED" + + rpm { + source { + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + } + } + } + + inventory_filters { + os_short_name = "" + os_version = "9.*" + } + } + + resource_groups { + resources { + id = "apt-to-deb" + + pkg { + desired_state = "REMOVED" + + deb { + source { + local_path = "$HOME/new-package.deb" + } + } + } + } + + resources { + id = "deb-local-path-to-gcs" + + pkg { + desired_state = "REMOVED" + + deb { + source { + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + } + } + } + + resources { + id = "new-googet" + + pkg { + desired_state = "REMOVED" + + googet { + name = "julia" + } + } + } + + resources { + id = "new-msi1" + + pkg { + desired_state = "REMOVED" + + msi { + source { + local_path = "$HOME/new-package.msi" + } + + properties = ["ACTION=INSTALL"] + } + } + } + + resources { + id = "new-msi2" + + pkg { + desired_state = "REMOVED" + + msi { + source { + allow_insecure = false + + remote { + uri = "https://remote.uri.com/new-package.msi" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + } + } + } + + resources { + id = "new-msi3" + + pkg { + desired_state = "REMOVED" + + msi { + source { + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + } + } + } + } + + allow_no_resource_group_match = true + description = "An updated test os policy" + } + + rollout { + disruption_budget { + fixed = 2 + } + + min_wait_duration = "7.5s" + } + + description = "An updated test os policy assignment" + project = "%{project_name}" +} + + +`, context) +} + +func testAccOsConfigOsPolicyAssignment_FixedOsPolicyAssignmentUpdate1(context map[string]interface{}) string { + return Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = true + } + + location = "%{zone}" + name = "tf-test-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + apt { + archive_type = "DEB" + components = ["doc"] + distribution = "debian" + uri = "https://atl.mirrors.clouvider.net/debian" + gpg_key = ".gnupg/pubring.kbx" + } + } + } + + resources { + id = "yum" + + repository { + yum { + base_url = "http://centos.s.uw.edu/centos/" + id = "yum" + display_name = "yum" + gpg_keys = ["RPM-GPG-KEY-CentOS-7"] + } + } + } + + resources { + id = "zypper" + + repository { + zypper { + base_url = "http://mirror.dal10.us.leaseweb.net/opensuse" + id = "zypper" + display_name = "zypper" + gpg_keys = ["sample-key-uri"] + } + } + } + + resources { + id = "goo" + + repository { + goo { + name = "goo" + url = "https://foo.com/googet/bar" + } + } + } + + resources { + id = "exec1" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec2" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec3" + + exec { + validate { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + } + } + + resources { + id = "exec4" + + exec { + validate { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + + enforce { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "file1" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + local_path = "$HOME/file" + } + } + } + } + + resource_groups { + resources { + id = "file2" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/file" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + } + } + + resources { + id = "file3" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + + resources { + id = "file4" + + file { + path = "$HOME/file" + state = "PRESENT" + content = "sample-content" + } + } + } + } + + rollout { + disruption_budget { + percent = 1 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "%{project_name}" +} + + +`, context) +} + +func testAccOsConfigOsPolicyAssignment_PercentOsPolicyAssignment(context map[string]interface{}) string { + return Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = true + } + + location = "%{zone}" + name = "tf-test-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + apt { + archive_type = "DEB" + components = ["doc"] + distribution = "debian" + uri = "https://atl.mirrors.clouvider.net/debian" + gpg_key = ".gnupg/pubring.kbx" + } + } + } + + resources { + id = "yum" + + repository { + yum { + base_url = "http://centos.s.uw.edu/centos/" + id = "yum" + display_name = "yum" + gpg_keys = ["RPM-GPG-KEY-CentOS-7"] + } + } + } + + resources { + id = "zypper" + + repository { + zypper { + base_url = "http://mirror.dal10.us.leaseweb.net/opensuse" + id = "zypper" + display_name = "zypper" + gpg_keys = ["sample-key-uri"] + } + } + } + + resources { + id = "goo" + + repository { + goo { + name = "goo" + url = "https://foo.com/googet/bar" + } + } + } + + resources { + id = "exec1" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec2" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec3" + + exec { + validate { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + } + } + + resources { + id = "exec4" + + exec { + validate { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + + enforce { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "file1" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + local_path = "$HOME/file" + } + } + } + } + + resource_groups { + resources { + id = "file2" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/file" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + } + } + + resources { + id = "file3" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + + resources { + id = "file4" + + file { + path = "$HOME/file" + state = "PRESENT" + content = "sample-content" + } + } + } + } + + rollout { + disruption_budget { + percent = 1 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "%{project_name}" +} + + +`, context) +} + +func testAccOsConfigOsPolicyAssignment_PercentOsPolicyAssignmentUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + + exclusion_labels { + labels = { + label-two = "value-two" + } + } + + inclusion_labels { + labels = { + label-one = "value-one" + } + } + } + + location = "%{zone}" + name = "tf-test-assignment%{random_suffix}" + + os_policies { + id = "new-policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + yum { + base_url = "http://mirrors.rcs.alaska.edu/centos/" + id = "new-yum" + display_name = "new-yum" + gpg_keys = ["RPM-GPG-KEY-CentOS-Debug-7"] + } + } + } + + resources { + id = "new-yum" + + repository { + yum { + base_url = "http://mirrors.rcs.alaska.edu/centos/" + id = "new-yum" + display_name = "new-yum" + gpg_keys = ["RPM-GPG-KEY-CentOS-Debug-7"] + } + } + } + + resources { + id = "new-zypper" + + repository { + zypper { + base_url = "http://mirror.vtti.vt.edu/opensuse/" + id = "new-zypper" + display_name = "new-zypper" + gpg_keys = ["new-sample-key-uri"] + } + } + } + + resources { + id = "new-goo" + + repository { + goo { + name = "new-goo" + url = "https://foo.com/googet/baz" + } + } + } + + resources { + id = "new-exec1" + + exec { + validate { + interpreter = "POWERSHELL" + args = ["arg2"] + + file { + local_path = "$HOME/script.bat" + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "POWERSHELL" + args = ["arg2"] + + file { + allow_insecure = false + + remote { + uri = "https://www.example.com/script.bat" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "new-exec2" + + exec { + validate { + interpreter = "POWERSHELL" + args = ["arg2"] + + file { + allow_insecure = false + + remote { + uri = "https://www.example.com/script.bat" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "POWERSHELL" + args = ["arg2"] + + file { + local_path = "$HOME/script.bat" + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "new-exec3" + + exec { + validate { + interpreter = "POWERSHELL" + + file { + allow_insecure = false + + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "POWERSHELL" + output_file_path = "$HOME/out" + script = "dir" + } + } + } + + resources { + id = "new-exec4" + + exec { + validate { + interpreter = "POWERSHELL" + output_file_path = "$HOME/out" + script = "dir" + } + + enforce { + interpreter = "POWERSHELL" + + file { + allow_insecure = false + + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "new-file1" + + file { + path = "$HOME/new-file" + state = "PRESENT" + + file { + local_path = "$HOME/new-file" + } + } + } + } + + resource_groups { + resources { + id = "new-file2" + + file { + path = "$HOME/new-file" + state = "CONTENTS_MATCH" + + file { + allow_insecure = false + + remote { + uri = "https://www.example.com/new-file" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + } + } + + resources { + id = "new-file3" + + file { + path = "$HOME/new-file" + state = "CONTENTS_MATCH" + + file { + gcs { + bucket = "new-test-bucket" + object = "new-test-object" + generation = 2 + } + } + } + } + + resources { + id = "new-file4" + + file { + path = "$HOME/new-file" + state = "CONTENTS_MATCH" + content = "new-sample-content" + } + } + } + } + + rollout { + disruption_budget { + percent = 2 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckOsConfigOsPolicyAssignmentDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_os_config_os_policy_assignment" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &osconfig.OSPolicyAssignment{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + Baseline: dcl.Bool(rs.Primary.Attributes["baseline"] == "true"), + Deleted: dcl.Bool(rs.Primary.Attributes["deleted"] == "true"), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Reconciling: dcl.Bool(rs.Primary.Attributes["reconciling"] == "true"), + RevisionCreateTime: dcl.StringOrNil(rs.Primary.Attributes["revision_create_time"]), + RevisionId: dcl.StringOrNil(rs.Primary.Attributes["revision_id"]), + RolloutState: osconfig.OSPolicyAssignmentRolloutStateEnumRef(rs.Primary.Attributes["rollout_state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + } + + client := NewDCLOsConfigClient(config, config.userAgent, billingProject, 0) + _, err := client.GetOSPolicyAssignment(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_os_config_os_policy_assignment still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_os_config_os_policy_assignment_sweeper_test.go b/google/resource_os_config_os_policy_assignment_sweeper_test.go new file mode 100644 index 00000000000..6a78a9ec3a1 --- /dev/null +++ b/google/resource_os_config_os_policy_assignment_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + osconfig "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("OsConfigOsPolicyAssignment", &resource.Sweeper{ + Name: "OsConfigOsPolicyAssignment", + F: testSweepOsConfigOsPolicyAssignment, + }) +} + +func testSweepOsConfigOsPolicyAssignment(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for OsConfigOsPolicyAssignment") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLOsConfigClient(config, config.userAgent, "", 0) + err = client.DeleteAllOSPolicyAssignment(context.Background(), d["project"], d["location"], isDeletableOsConfigOsPolicyAssignment) + if err != nil { + return err + } + return nil +} + +func isDeletableOsConfigOsPolicyAssignment(r *osconfig.OSPolicyAssignment) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_privateca_certificate_template.go b/google/resource_privateca_certificate_template.go new file mode 100644 index 00000000000..e57c33ec8f8 --- /dev/null +++ b/google/resource_privateca_certificate_template.go @@ -0,0 +1,1238 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" +) + +func resourcePrivatecaCertificateTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourcePrivatecaCertificateTemplateCreate, + Read: resourcePrivatecaCertificateTemplateRead, + Update: resourcePrivatecaCertificateTemplateUpdate, + Delete: resourcePrivatecaCertificateTemplateDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePrivatecaCertificateTemplateImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The resource name for this CertificateTemplate in the format `projects/*/locations/*/certificateTemplates/*`.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. A human-readable description of scenarios this template is intended for.", + }, + + "identity_constraints": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes constraints on identities that may be appear in Certificates issued using this template. If this is omitted, then this template will not add restrictions on a certificate's identity.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplateIdentityConstraintsSchema(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "Optional. Labels with user-defined metadata.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "passthrough_extensions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes the set of X.509 extensions that may appear in a Certificate issued using this CertificateTemplate. If a certificate request sets extensions that don't appear in the passthrough_extensions, those extensions will be dropped. If the issuing CaPool's IssuancePolicy defines baseline_values that don't appear here, the certificate issuance request will fail. If this is omitted, then this template will not add restrictions on a certificate's X.509 extensions. These constraints do not apply to X.509 extensions set in this CertificateTemplate's predefined_values.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePassthroughExtensionsSchema(), + }, + + "predefined_values": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A set of X.509 values that will be applied to all issued certificates that use this template. If the certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If the issuing CaPool's IssuancePolicy defines conflicting baseline_values for the same properties, the certificate issuance request will fail.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesSchema(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this CertificateTemplate was created.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time at which this CertificateTemplate was updated.", + }, + }, + } +} + +func PrivatecaCertificateTemplateIdentityConstraintsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_subject_alt_names_passthrough": { + Type: schema.TypeBool, + Required: true, + Description: "Required. If this is true, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. Otherwise, the requested SubjectAltNames will be discarded.", + }, + + "allow_subject_passthrough": { + Type: schema.TypeBool, + Required: true, + Description: "Required. If this is true, the Subject field may be copied from a certificate request into the signed certificate. Otherwise, the requested Subject will be discarded.", + }, + + "cel_expression": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a certificate is signed. To see the full allowed syntax and some examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel", + MaxItems: 1, + Elem: PrivatecaCertificateTemplateIdentityConstraintsCelExpressionSchema(), + }, + }, + } +} + +func PrivatecaCertificateTemplateIdentityConstraintsCelExpressionSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + }, + + "expression": { + Type: schema.TypeString, + Optional: true, + Description: "Textual representation of an expression in Common Expression Language syntax.", + }, + + "location": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + }, + + "title": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + }, + }, + } +} + +func PrivatecaCertificateTemplatePassthroughExtensionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "additional_extensions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A set of ObjectIds identifying custom X.509 extensions. Will be combined with known_extensions to determine the full set of X.509 extensions.", + Elem: PrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsSchema(), + }, + + "known_extensions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. A set of named X.509 extensions. Will be combined with additional_extensions to determine the full set of X.509 extensions.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func PrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id_path": { + Type: schema.TypeList, + Required: true, + Description: "Required. The parts of an OID path. The most significant parts of the path come first.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "additional_extensions": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes custom X.509 extensions.", + Elem: PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsSchema(), + }, + + "aia_ocsp_servers": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the \"Authority Information Access\" extension in the certificate.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ca_options": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes options in this X509Parameters that are relevant in a CA certificate.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesCaOptionsSchema(), + }, + + "key_usage": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Indicates the intended use for keys that correspond to a certificate.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageSchema(), + }, + + "policy_ids": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.", + Elem: PrivatecaCertificateTemplatePredefinedValuesPolicyIdsSchema(), + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id": { + Type: schema.TypeList, + Required: true, + Description: "Required. The OID for this X.509 extension.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectIdSchema(), + }, + + "value": { + Type: schema.TypeString, + Required: true, + Description: "Required. The value of this X.509 extension.", + }, + + "critical": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error).", + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectIdSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id_path": { + Type: schema.TypeList, + Required: true, + Description: "Required. The parts of an OID path. The most significant parts of the path come first.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesCaOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_ca": { + Type: schema.TypeBool, + Optional: true, + Description: "Optional. Refers to the \"CA\" X.509 extension, which is a boolean value. When this value is missing, the extension will be omitted from the CA certificate.", + }, + + "max_issuer_path_length": { + Type: schema.TypeInt, + Optional: true, + Description: "Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.", + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesKeyUsageSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_key_usage": { + Type: schema.TypeList, + Optional: true, + Description: "Describes high-level ways in which a key may be used.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsageSchema(), + }, + + "extended_key_usage": { + Type: schema.TypeList, + Optional: true, + Description: "Detailed scenarios in which a key may be used.", + MaxItems: 1, + Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsageSchema(), + }, + + "unknown_extended_key_usages": { + Type: schema.TypeList, + Optional: true, + Description: "Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message.", + Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesSchema(), + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsageSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert_sign": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used to sign certificates.", + }, + + "content_commitment": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used for cryptographic commitments. Note that this may also be referred to as \"non-repudiation\".", + }, + + "crl_sign": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used sign certificate revocation lists.", + }, + + "data_encipherment": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used to encipher data.", + }, + + "decipher_only": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used to decipher only.", + }, + + "digital_signature": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used for digital signatures.", + }, + + "encipher_only": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used to encipher only.", + }, + + "key_agreement": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used in a key agreement protocol.", + }, + + "key_encipherment": { + Type: schema.TypeBool, + Optional: true, + Description: "The key may be used to encipher other keys.", + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsageSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_auth": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as \"TLS WWW client authentication\", though regularly used for non-WWW TLS.", + }, + + "code_signing": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as \"Signing of downloadable executable code client authentication\".", + }, + + "email_protection": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as \"Email protection\".", + }, + + "ocsp_signing": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as \"Signing OCSP responses\".", + }, + + "server_auth": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as \"TLS WWW server authentication\", though regularly used for non-WWW TLS.", + }, + + "time_stamping": { + Type: schema.TypeBool, + Optional: true, + Description: "Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as \"Binding the hash of an object to a time\".", + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id_path": { + Type: schema.TypeList, + Required: true, + Description: "Required. The parts of an OID path. The most significant parts of the path come first.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func PrivatecaCertificateTemplatePredefinedValuesPolicyIdsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_id_path": { + Type: schema.TypeList, + Required: true, + Description: "Required. The parts of an OID path. The most significant parts of the path come first.", + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + }, + } +} + +func resourcePrivatecaCertificateTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &privateca.CertificateTemplate{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), + Labels: checkStringMap(d.Get("labels")), + PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), + PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), + Project: dcl.String(project), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCertificateTemplate(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating CertificateTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateTemplate %q: %#v", d.Id(), res) + + return resourcePrivatecaCertificateTemplateRead(d, meta) +} + +func resourcePrivatecaCertificateTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &privateca.CertificateTemplate{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), + Labels: checkStringMap(d.Get("labels")), + PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), + PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), + Project: dcl.String(project), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetCertificateTemplate(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("PrivatecaCertificateTemplate %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("location", res.Location); err != nil { + return fmt.Errorf("error setting location in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + if err = d.Set("description", res.Description); err != nil { + return fmt.Errorf("error setting description in state: %s", err) + } + if err = d.Set("identity_constraints", flattenPrivatecaCertificateTemplateIdentityConstraints(res.IdentityConstraints)); err != nil { + return fmt.Errorf("error setting identity_constraints in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("passthrough_extensions", flattenPrivatecaCertificateTemplatePassthroughExtensions(res.PassthroughExtensions)); err != nil { + return fmt.Errorf("error setting passthrough_extensions in state: %s", err) + } + if err = d.Set("predefined_values", flattenPrivatecaCertificateTemplatePredefinedValues(res.PredefinedValues)); err != nil { + return fmt.Errorf("error setting predefined_values in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("update_time", res.UpdateTime); err != nil { + return fmt.Errorf("error setting update_time in state: %s", err) + } + + return nil +} +func resourcePrivatecaCertificateTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &privateca.CertificateTemplate{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), + Labels: checkStringMap(d.Get("labels")), + PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), + PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), + Project: dcl.String(project), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyCertificateTemplate(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating CertificateTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished creating CertificateTemplate %q: %#v", d.Id(), res) + + return resourcePrivatecaCertificateTemplateRead(d, meta) +} + +func resourcePrivatecaCertificateTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &privateca.CertificateTemplate{ + Location: dcl.String(d.Get("location").(string)), + Name: dcl.String(d.Get("name").(string)), + Description: dcl.String(d.Get("description").(string)), + IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), + Labels: checkStringMap(d.Get("labels")), + PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), + PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), + Project: dcl.String(project), + } + + log.Printf("[DEBUG] Deleting CertificateTemplate %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteCertificateTemplate(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting CertificateTemplate: %s", err) + } + + log.Printf("[DEBUG] Finished deleting CertificateTemplate %q", d.Id()) + return nil +} + +func resourcePrivatecaCertificateTemplateImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandPrivatecaCertificateTemplateIdentityConstraints(o interface{}) *privateca.CertificateTemplateIdentityConstraints { + if o == nil { + return privateca.EmptyCertificateTemplateIdentityConstraints + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplateIdentityConstraints + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplateIdentityConstraints{ + AllowSubjectAltNamesPassthrough: dcl.Bool(obj["allow_subject_alt_names_passthrough"].(bool)), + AllowSubjectPassthrough: dcl.Bool(obj["allow_subject_passthrough"].(bool)), + CelExpression: expandPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj["cel_expression"]), + } +} + +func flattenPrivatecaCertificateTemplateIdentityConstraints(obj *privateca.CertificateTemplateIdentityConstraints) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_subject_alt_names_passthrough": obj.AllowSubjectAltNamesPassthrough, + "allow_subject_passthrough": obj.AllowSubjectPassthrough, + "cel_expression": flattenPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj.CelExpression), + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplateIdentityConstraintsCelExpression(o interface{}) *privateca.CertificateTemplateIdentityConstraintsCelExpression { + if o == nil { + return privateca.EmptyCertificateTemplateIdentityConstraintsCelExpression + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplateIdentityConstraintsCelExpression + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplateIdentityConstraintsCelExpression{ + Description: dcl.String(obj["description"].(string)), + Expression: dcl.String(obj["expression"].(string)), + Location: dcl.String(obj["location"].(string)), + Title: dcl.String(obj["title"].(string)), + } +} + +func flattenPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj *privateca.CertificateTemplateIdentityConstraintsCelExpression) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "description": obj.Description, + "expression": obj.Expression, + "location": obj.Location, + "title": obj.Title, + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplatePassthroughExtensions(o interface{}) *privateca.CertificateTemplatePassthroughExtensions { + if o == nil { + return privateca.EmptyCertificateTemplatePassthroughExtensions + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePassthroughExtensions + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePassthroughExtensions{ + AdditionalExtensions: expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(obj["additional_extensions"]), + KnownExtensions: expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj["known_extensions"]), + } +} + +func flattenPrivatecaCertificateTemplatePassthroughExtensions(obj *privateca.CertificateTemplatePassthroughExtensions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "additional_extensions": flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(obj.AdditionalExtensions), + "known_extensions": flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj.KnownExtensions), + } + + return []interface{}{transformed} + +} +func expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(o interface{}) []privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions { + if o == nil { + return make([]privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0) + } + + items := make([]privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0, len(objs)) + for _, item := range objs { + i := expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(item) + items = append(items, *i) + } + + return items +} + +func expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(o interface{}) *privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions { + if o == nil { + return privateca.EmptyCertificateTemplatePassthroughExtensionsAdditionalExtensions + } + + obj := o.(map[string]interface{}) + return &privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions{ + ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + } +} + +func flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(objs []privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(&item) + items = append(items, i) + } + + return items +} + +func flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(obj *privateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "object_id_path": obj.ObjectIdPath, + } + + return transformed + +} + +func expandPrivatecaCertificateTemplatePredefinedValues(o interface{}) *privateca.CertificateTemplatePredefinedValues { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValues + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValues + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValues{ + AdditionalExtensions: expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(obj["additional_extensions"]), + AiaOcspServers: expandStringArray(obj["aia_ocsp_servers"]), + CaOptions: expandPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj["ca_options"]), + KeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj["key_usage"]), + PolicyIds: expandPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(obj["policy_ids"]), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValues(obj *privateca.CertificateTemplatePredefinedValues) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "additional_extensions": flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(obj.AdditionalExtensions), + "aia_ocsp_servers": obj.AiaOcspServers, + "ca_options": flattenPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj.CaOptions), + "key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj.KeyUsage), + "policy_ids": flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(obj.PolicyIds), + } + + return []interface{}{transformed} + +} +func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(o interface{}) []privateca.CertificateTemplatePredefinedValuesAdditionalExtensions { + if o == nil { + return make([]privateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]privateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0) + } + + items := make([]privateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0, len(objs)) + for _, item := range objs { + i := expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(item) + items = append(items, *i) + } + + return items +} + +func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(o interface{}) *privateca.CertificateTemplatePredefinedValuesAdditionalExtensions { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensions + } + + obj := o.(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesAdditionalExtensions{ + ObjectId: expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj["object_id"]), + Value: dcl.String(obj["value"].(string)), + Critical: dcl.Bool(obj["critical"].(bool)), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(objs []privateca.CertificateTemplatePredefinedValuesAdditionalExtensions) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(&item) + items = append(items, i) + } + + return items +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(obj *privateca.CertificateTemplatePredefinedValuesAdditionalExtensions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "object_id": flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj.ObjectId), + "value": obj.Value, + "critical": obj.Critical, + } + + return transformed + +} + +func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(o interface{}) *privateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId{ + ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj *privateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "object_id_path": obj.ObjectIdPath, + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplatePredefinedValuesCaOptions(o interface{}) *privateca.CertificateTemplatePredefinedValuesCaOptions { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesCaOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValuesCaOptions + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesCaOptions{ + IsCa: dcl.Bool(obj["is_ca"].(bool)), + MaxIssuerPathLength: dcl.Int64(int64(obj["max_issuer_path_length"].(int))), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj *privateca.CertificateTemplatePredefinedValuesCaOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "is_ca": obj.IsCa, + "max_issuer_path_length": obj.MaxIssuerPathLength, + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsage(o interface{}) *privateca.CertificateTemplatePredefinedValuesKeyUsage { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsage + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsage + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesKeyUsage{ + BaseKeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj["base_key_usage"]), + ExtendedKeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj["extended_key_usage"]), + UnknownExtendedKeyUsages: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(obj["unknown_extended_key_usages"]), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj *privateca.CertificateTemplatePredefinedValuesKeyUsage) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "base_key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj.BaseKeyUsage), + "extended_key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj.ExtendedKeyUsage), + "unknown_extended_key_usages": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(obj.UnknownExtendedKeyUsages), + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(o interface{}) *privateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage{ + CertSign: dcl.Bool(obj["cert_sign"].(bool)), + ContentCommitment: dcl.Bool(obj["content_commitment"].(bool)), + CrlSign: dcl.Bool(obj["crl_sign"].(bool)), + DataEncipherment: dcl.Bool(obj["data_encipherment"].(bool)), + DecipherOnly: dcl.Bool(obj["decipher_only"].(bool)), + DigitalSignature: dcl.Bool(obj["digital_signature"].(bool)), + EncipherOnly: dcl.Bool(obj["encipher_only"].(bool)), + KeyAgreement: dcl.Bool(obj["key_agreement"].(bool)), + KeyEncipherment: dcl.Bool(obj["key_encipherment"].(bool)), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj *privateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "cert_sign": obj.CertSign, + "content_commitment": obj.ContentCommitment, + "crl_sign": obj.CrlSign, + "data_encipherment": obj.DataEncipherment, + "decipher_only": obj.DecipherOnly, + "digital_signature": obj.DigitalSignature, + "encipher_only": obj.EncipherOnly, + "key_agreement": obj.KeyAgreement, + "key_encipherment": obj.KeyEncipherment, + } + + return []interface{}{transformed} + +} + +func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(o interface{}) *privateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage + } + obj := objArr[0].(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage{ + ClientAuth: dcl.Bool(obj["client_auth"].(bool)), + CodeSigning: dcl.Bool(obj["code_signing"].(bool)), + EmailProtection: dcl.Bool(obj["email_protection"].(bool)), + OcspSigning: dcl.Bool(obj["ocsp_signing"].(bool)), + ServerAuth: dcl.Bool(obj["server_auth"].(bool)), + TimeStamping: dcl.Bool(obj["time_stamping"].(bool)), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj *privateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "client_auth": obj.ClientAuth, + "code_signing": obj.CodeSigning, + "email_protection": obj.EmailProtection, + "ocsp_signing": obj.OcspSigning, + "server_auth": obj.ServerAuth, + "time_stamping": obj.TimeStamping, + } + + return []interface{}{transformed} + +} +func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(o interface{}) []privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages { + if o == nil { + return make([]privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0) + } + + items := make([]privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0, len(objs)) + for _, item := range objs { + i := expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(item) + items = append(items, *i) + } + + return items +} + +func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(o interface{}) *privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages + } + + obj := o.(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages{ + ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(objs []privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(&item) + items = append(items, i) + } + + return items +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(obj *privateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "object_id_path": obj.ObjectIdPath, + } + + return transformed + +} +func expandPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(o interface{}) []privateca.CertificateTemplatePredefinedValuesPolicyIds { + if o == nil { + return make([]privateca.CertificateTemplatePredefinedValuesPolicyIds, 0) + } + + objs := o.([]interface{}) + if len(objs) == 0 { + return make([]privateca.CertificateTemplatePredefinedValuesPolicyIds, 0) + } + + items := make([]privateca.CertificateTemplatePredefinedValuesPolicyIds, 0, len(objs)) + for _, item := range objs { + i := expandPrivatecaCertificateTemplatePredefinedValuesPolicyIds(item) + items = append(items, *i) + } + + return items +} + +func expandPrivatecaCertificateTemplatePredefinedValuesPolicyIds(o interface{}) *privateca.CertificateTemplatePredefinedValuesPolicyIds { + if o == nil { + return privateca.EmptyCertificateTemplatePredefinedValuesPolicyIds + } + + obj := o.(map[string]interface{}) + return &privateca.CertificateTemplatePredefinedValuesPolicyIds{ + ObjectIdPath: expandIntegerArray(obj["object_id_path"]), + } +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(objs []privateca.CertificateTemplatePredefinedValuesPolicyIds) []interface{} { + if objs == nil { + return nil + } + + items := []interface{}{} + for _, item := range objs { + i := flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIds(&item) + items = append(items, i) + } + + return items +} + +func flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIds(obj *privateca.CertificateTemplatePredefinedValuesPolicyIds) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "object_id_path": obj.ObjectIdPath, + } + + return transformed + +} +func flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj []privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum) interface{} { + if obj == nil { + return nil + } + items := []string{} + for _, item := range obj { + items = append(items, string(item)) + } + return items +} + +func expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(o interface{}) []privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum { + objs := o.([]interface{}) + items := make([]privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum, 0, len(objs)) + for _, item := range objs { + i := privateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnumRef(item.(string)) + items = append(items, *i) + } + return items +} diff --git a/google/resource_privateca_certificate_template_generated_test.go b/google/resource_privateca_certificate_template_generated_test.go new file mode 100644 index 00000000000..2346d9cdd1a --- /dev/null +++ b/google/resource_privateca_certificate_template_generated_test.go @@ -0,0 +1,273 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccPrivatecaCertificateTemplate_BasicCertificateTemplate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "region": getTestRegionFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPrivatecaCertificateTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPrivatecaCertificateTemplate_BasicCertificateTemplate(context), + }, + { + ResourceName: "google_privateca_certificate_template.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"predefined_values.0.key_usage.0.extended_key_usage"}, + }, + { + Config: testAccPrivatecaCertificateTemplate_BasicCertificateTemplateUpdate0(context), + }, + { + ResourceName: "google_privateca_certificate_template.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"predefined_values.0.key_usage.0.extended_key_usage"}, + }, + }, + }) +} + +func testAccPrivatecaCertificateTemplate_BasicCertificateTemplate(context map[string]interface{}) string { + return Nprintf(` +resource "google_privateca_certificate_template" "primary" { + location = "%{region}" + name = "tf-test-template%{random_suffix}" + description = "An updated sample certificate template" + + identity_constraints { + allow_subject_alt_names_passthrough = true + allow_subject_passthrough = true + + cel_expression { + description = "Always true" + expression = "true" + location = "any.file.anywhere" + title = "Sample expression" + } + } + + labels = { + label-two = "value-two" + } + + passthrough_extensions { + additional_extensions { + object_id_path = [1, 6] + } + + known_extensions = ["EXTENDED_KEY_USAGE"] + } + + predefined_values { + additional_extensions { + object_id { + object_id_path = [1, 6] + } + + value = "c3RyaW5nCg==" + critical = true + } + + aia_ocsp_servers = ["string"] + + ca_options { + is_ca = false + max_issuer_path_length = 6 + } + + key_usage { + base_key_usage { + cert_sign = false + content_commitment = true + crl_sign = false + data_encipherment = true + decipher_only = true + digital_signature = true + encipher_only = true + key_agreement = true + key_encipherment = true + } + + extended_key_usage { + client_auth = true + code_signing = true + email_protection = true + ocsp_signing = true + server_auth = true + time_stamping = true + } + + unknown_extended_key_usages { + object_id_path = [1, 6] + } + } + + policy_ids { + object_id_path = [1, 6] + } + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccPrivatecaCertificateTemplate_BasicCertificateTemplateUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_privateca_certificate_template" "primary" { + location = "%{region}" + name = "tf-test-template%{random_suffix}" + description = "A sample certificate template" + + identity_constraints { + allow_subject_alt_names_passthrough = false + allow_subject_passthrough = false + + cel_expression { + description = "Always false" + expression = "false" + location = "update.certificate_template.json" + title = "New sample expression" + } + } + + labels = { + label-one = "value-one" + } + + passthrough_extensions { + additional_extensions { + object_id_path = [1, 7] + } + + known_extensions = ["BASE_KEY_USAGE"] + } + + predefined_values { + additional_extensions { + object_id { + object_id_path = [1, 7] + } + + value = "bmV3LXN0cmluZw==" + critical = false + } + + aia_ocsp_servers = ["new-string"] + + ca_options { + is_ca = true + max_issuer_path_length = 7 + } + + key_usage { + base_key_usage { + cert_sign = true + content_commitment = false + crl_sign = true + data_encipherment = false + decipher_only = false + digital_signature = false + encipher_only = false + key_agreement = false + key_encipherment = false + } + + extended_key_usage { + client_auth = false + code_signing = false + email_protection = false + ocsp_signing = false + server_auth = false + time_stamping = false + } + + unknown_extended_key_usages { + object_id_path = [1, 7] + } + } + + policy_ids { + object_id_path = [1, 7] + } + } + + project = "%{project_name}" +} + + +`, context) +} + +func testAccCheckPrivatecaCertificateTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_privateca_certificate_template" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &privateca.CertificateTemplate{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Description: dcl.String(rs.Primary.Attributes["description"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := NewDCLPrivatecaClient(config, config.userAgent, billingProject, 0) + _, err := client.GetCertificateTemplate(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_privateca_certificate_template still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_privateca_certificate_template_sweeper_test.go b/google/resource_privateca_certificate_template_sweeper_test.go new file mode 100644 index 00000000000..5e63170cf39 --- /dev/null +++ b/google/resource_privateca_certificate_template_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + privateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("PrivatecaCertificateTemplate", &resource.Sweeper{ + Name: "PrivatecaCertificateTemplate", + F: testSweepPrivatecaCertificateTemplate, + }) +} + +func testSweepPrivatecaCertificateTemplate(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for PrivatecaCertificateTemplate") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLPrivatecaClient(config, config.userAgent, "", 0) + err = client.DeleteAllCertificateTemplate(context.Background(), d["project"], d["location"], isDeletablePrivatecaCertificateTemplate) + if err != nil { + return err + } + return nil +} + +func isDeletablePrivatecaCertificateTemplate(r *privateca.CertificateTemplate) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/resource_recaptcha_enterprise_key.go b/google/resource_recaptcha_enterprise_key.go new file mode 100644 index 00000000000..671c4436c9b --- /dev/null +++ b/google/resource_recaptcha_enterprise_key.go @@ -0,0 +1,582 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" +) + +func resourceRecaptchaEnterpriseKey() *schema.Resource { + return &schema.Resource{ + Create: resourceRecaptchaEnterpriseKeyCreate, + Read: resourceRecaptchaEnterpriseKeyRead, + Update: resourceRecaptchaEnterpriseKeyUpdate, + Delete: resourceRecaptchaEnterpriseKeyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRecaptchaEnterpriseKeyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: "Human-readable display name of this key. Modifiable by user.", + }, + + "android_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by Android apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyAndroidSettingsSchema(), + ConflictsWith: []string{"web_settings", "ios_settings"}, + }, + + "ios_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by iOS apps.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyIosSettingsSchema(), + ConflictsWith: []string{"web_settings", "android_settings"}, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: "See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "testing_options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Options for user acceptance testing.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyTestingOptionsSchema(), + }, + + "web_settings": { + Type: schema.TypeList, + Optional: true, + Description: "Settings for keys that can be used by websites.", + MaxItems: 1, + Elem: RecaptchaEnterpriseKeyWebSettingsSchema(), + ConflictsWith: []string{"android_settings", "ios_settings"}, + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "The timestamp corresponding to the creation of this Key.", + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The resource name for the Key in the format \"projects/{project}/keys/{key}\".", + }, + }, + } +} + +func RecaptchaEnterpriseKeyAndroidSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_package_names": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_package_names will not be enforced.", + }, + + "allowed_package_names": { + Type: schema.TypeList, + Optional: true, + Description: "Android package names of apps allowed to use the key. Example: 'com.companyname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyIosSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow_all_bundle_ids": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_bundle_ids will not be enforced.", + }, + + "allowed_bundle_ids": { + Type: schema.TypeList, + Optional: true, + Description: "iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func RecaptchaEnterpriseKeyTestingOptionsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "testing_challenge": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE", + }, + + "testing_score": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Description: "All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive.", + }, + }, + } +} + +func RecaptchaEnterpriseKeyWebSettingsSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "integration_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE", + }, + + "allow_all_domains": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, it means allowed_domains will not be enforced.", + }, + + "allow_amp_traffic": { + Type: schema.TypeBool, + Optional: true, + Description: "If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type.", + }, + + "allowed_domains": { + Type: schema.TypeList, + Optional: true, + Description: "Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "challenge_security_preference": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: "Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY", + }, + }, + } +} + +func resourceRecaptchaEnterpriseKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + } + + id, err := replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + if err != nil { + return fmt.Errorf("error constructing id: %s", err) + } + d.SetId(id) + createDirective := CreateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutCreate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, createDirective...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error creating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + // Id has a server-generated value, set again after creation + id, err = replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutRead)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.GetKey(context.Background(), obj) + if err != nil { + resourceName := fmt.Sprintf("RecaptchaEnterpriseKey %q", d.Id()) + return handleNotFoundDCLError(err, d, resourceName) + } + + if err = d.Set("display_name", res.DisplayName); err != nil { + return fmt.Errorf("error setting display_name in state: %s", err) + } + if err = d.Set("android_settings", flattenRecaptchaEnterpriseKeyAndroidSettings(res.AndroidSettings)); err != nil { + return fmt.Errorf("error setting android_settings in state: %s", err) + } + if err = d.Set("ios_settings", flattenRecaptchaEnterpriseKeyIosSettings(res.IosSettings)); err != nil { + return fmt.Errorf("error setting ios_settings in state: %s", err) + } + if err = d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("error setting labels in state: %s", err) + } + if err = d.Set("project", res.Project); err != nil { + return fmt.Errorf("error setting project in state: %s", err) + } + if err = d.Set("testing_options", flattenRecaptchaEnterpriseKeyTestingOptions(res.TestingOptions)); err != nil { + return fmt.Errorf("error setting testing_options in state: %s", err) + } + if err = d.Set("web_settings", flattenRecaptchaEnterpriseKeyWebSettings(res.WebSettings)); err != nil { + return fmt.Errorf("error setting web_settings in state: %s", err) + } + if err = d.Set("create_time", res.CreateTime); err != nil { + return fmt.Errorf("error setting create_time in state: %s", err) + } + if err = d.Set("name", res.Name); err != nil { + return fmt.Errorf("error setting name in state: %s", err) + } + + return nil +} +func resourceRecaptchaEnterpriseKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + directive := UpdateDirective + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutUpdate)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + res, err := client.ApplyKey(context.Background(), obj, directive...) + + if _, ok := err.(dcl.DiffAfterApplyError); ok { + log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) + } else if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error updating Key: %s", err) + } + + log.Printf("[DEBUG] Finished creating Key %q: %#v", d.Id(), res) + + return resourceRecaptchaEnterpriseKeyRead(d, meta) +} + +func resourceRecaptchaEnterpriseKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(d.Get("display_name").(string)), + AndroidSettings: expandRecaptchaEnterpriseKeyAndroidSettings(d.Get("android_settings")), + IosSettings: expandRecaptchaEnterpriseKeyIosSettings(d.Get("ios_settings")), + Labels: checkStringMap(d.Get("labels")), + Project: dcl.String(project), + TestingOptions: expandRecaptchaEnterpriseKeyTestingOptions(d.Get("testing_options")), + WebSettings: expandRecaptchaEnterpriseKeyWebSettings(d.Get("web_settings")), + Name: dcl.StringOrNil(d.Get("name").(string)), + } + + log.Printf("[DEBUG] Deleting Key %q", d.Id()) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + billingProject := project + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + client := NewDCLRecaptchaEnterpriseClient(config, userAgent, billingProject, d.Timeout(schema.TimeoutDelete)) + if bp, err := replaceVars(d, config, client.Config.BasePath); err != nil { + d.SetId("") + return fmt.Errorf("Could not format %q: %w", client.Config.BasePath, err) + } else { + client.Config.BasePath = bp + } + if err := client.DeleteKey(context.Background(), obj); err != nil { + return fmt.Errorf("Error deleting Key: %s", err) + } + + log.Printf("[DEBUG] Finished deleting Key %q", d.Id()) + return nil +} + +func resourceRecaptchaEnterpriseKeyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/keys/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVarsForId(d, config, "projects/{{project}}/keys/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandRecaptchaEnterpriseKeyAndroidSettings(o interface{}) *recaptchaenterprise.KeyAndroidSettings { + if o == nil { + return recaptchaenterprise.EmptyKeyAndroidSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return recaptchaenterprise.EmptyKeyAndroidSettings + } + obj := objArr[0].(map[string]interface{}) + return &recaptchaenterprise.KeyAndroidSettings{ + AllowAllPackageNames: dcl.Bool(obj["allow_all_package_names"].(bool)), + AllowedPackageNames: expandStringArray(obj["allowed_package_names"]), + } +} + +func flattenRecaptchaEnterpriseKeyAndroidSettings(obj *recaptchaenterprise.KeyAndroidSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_package_names": obj.AllowAllPackageNames, + "allowed_package_names": obj.AllowedPackageNames, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyIosSettings(o interface{}) *recaptchaenterprise.KeyIosSettings { + if o == nil { + return recaptchaenterprise.EmptyKeyIosSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return recaptchaenterprise.EmptyKeyIosSettings + } + obj := objArr[0].(map[string]interface{}) + return &recaptchaenterprise.KeyIosSettings{ + AllowAllBundleIds: dcl.Bool(obj["allow_all_bundle_ids"].(bool)), + AllowedBundleIds: expandStringArray(obj["allowed_bundle_ids"]), + } +} + +func flattenRecaptchaEnterpriseKeyIosSettings(obj *recaptchaenterprise.KeyIosSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "allow_all_bundle_ids": obj.AllowAllBundleIds, + "allowed_bundle_ids": obj.AllowedBundleIds, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyTestingOptions(o interface{}) *recaptchaenterprise.KeyTestingOptions { + if o == nil { + return recaptchaenterprise.EmptyKeyTestingOptions + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return recaptchaenterprise.EmptyKeyTestingOptions + } + obj := objArr[0].(map[string]interface{}) + return &recaptchaenterprise.KeyTestingOptions{ + TestingChallenge: recaptchaenterprise.KeyTestingOptionsTestingChallengeEnumRef(obj["testing_challenge"].(string)), + TestingScore: dcl.Float64(obj["testing_score"].(float64)), + } +} + +func flattenRecaptchaEnterpriseKeyTestingOptions(obj *recaptchaenterprise.KeyTestingOptions) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "testing_challenge": obj.TestingChallenge, + "testing_score": obj.TestingScore, + } + + return []interface{}{transformed} + +} + +func expandRecaptchaEnterpriseKeyWebSettings(o interface{}) *recaptchaenterprise.KeyWebSettings { + if o == nil { + return recaptchaenterprise.EmptyKeyWebSettings + } + objArr := o.([]interface{}) + if len(objArr) == 0 { + return recaptchaenterprise.EmptyKeyWebSettings + } + obj := objArr[0].(map[string]interface{}) + return &recaptchaenterprise.KeyWebSettings{ + IntegrationType: recaptchaenterprise.KeyWebSettingsIntegrationTypeEnumRef(obj["integration_type"].(string)), + AllowAllDomains: dcl.Bool(obj["allow_all_domains"].(bool)), + AllowAmpTraffic: dcl.Bool(obj["allow_amp_traffic"].(bool)), + AllowedDomains: expandStringArray(obj["allowed_domains"]), + ChallengeSecurityPreference: recaptchaenterprise.KeyWebSettingsChallengeSecurityPreferenceEnumRef(obj["challenge_security_preference"].(string)), + } +} + +func flattenRecaptchaEnterpriseKeyWebSettings(obj *recaptchaenterprise.KeyWebSettings) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "integration_type": obj.IntegrationType, + "allow_all_domains": obj.AllowAllDomains, + "allow_amp_traffic": obj.AllowAmpTraffic, + "allowed_domains": obj.AllowedDomains, + "challenge_security_preference": obj.ChallengeSecurityPreference, + } + + return []interface{}{transformed} + +} diff --git a/google/resource_recaptcha_enterprise_key_generated_test.go b/google/resource_recaptcha_enterprise_key_generated_test.go new file mode 100644 index 00000000000..100aa9d6e36 --- /dev/null +++ b/google/resource_recaptcha_enterprise_key_generated_test.go @@ -0,0 +1,441 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "fmt" + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "strings" + "testing" +) + +func TestAccRecaptchaEnterpriseKey_AndroidKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_AndroidKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_IosKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_IosKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_MinimalKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_MinimalKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccRecaptchaEnterpriseKey_WebScoreKey(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": getTestProjectFromEnv(), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKey(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context), + }, + { + ResourceName: "google_recaptcha_enterprise_key.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccRecaptchaEnterpriseKey_AndroidKey(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + labels = { + label-one = "value-one" + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_AndroidKeyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + android_settings { + allow_all_package_names = false + allowed_package_names = ["com.android.application"] + } + + labels = { + label-two = "value-two" + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.8 + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKey(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + labels = { + label-one = "value-one" + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_IosKeyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + ios_settings { + allow_all_bundle_ids = false + allowed_bundle_ids = ["com.companyname.appname"] + } + + labels = { + label-two = "value-two" + } + + project = "%{project_name}" + + testing_options { + testing_score = 1 + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_MinimalKey(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + labels = {} + project = "%{project_name}" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKey(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + labels = { + label-one = "value-one" + } + + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebKeyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + labels = { + label-two = "value-two" + } + + project = "%{project_name}" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = false + allowed_domains = ["subdomain.example.com"] + challenge_security_preference = "SECURITY" + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKey(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + labels = { + label-one = "value-one" + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } +} + + +`, context) +} + +func testAccRecaptchaEnterpriseKey_WebScoreKeyUpdate0(context map[string]interface{}) string { + return Nprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-two" + + labels = { + label-two = "value-two" + } + + project = "%{project_name}" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = false + allow_amp_traffic = true + allowed_domains = ["subdomain.example.com"] + } +} + + +`, context) +} + +func testAccCheckRecaptchaEnterpriseKeyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_recaptcha_enterprise_key" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &recaptchaenterprise.Key{ + DisplayName: dcl.String(rs.Primary.Attributes["display_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Name: dcl.StringOrNil(rs.Primary.Attributes["name"]), + } + + client := NewDCLRecaptchaEnterpriseClient(config, config.userAgent, billingProject, 0) + _, err := client.GetKey(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_recaptcha_enterprise_key still exists %v", obj) + } + } + return nil + } +} diff --git a/google/resource_recaptcha_enterprise_key_sweeper_test.go b/google/resource_recaptcha_enterprise_key_sweeper_test.go new file mode 100644 index 00000000000..798d6ca3100 --- /dev/null +++ b/google/resource_recaptcha_enterprise_key_sweeper_test.go @@ -0,0 +1,71 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: DCL *** +// +// ---------------------------------------------------------------------------- +// +// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) +// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). +// Changes will need to be made to the DCL or Magic Modules instead of here. +// +// We are not currently able to accept contributions to this file. If changes +// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "testing" + + recaptchaenterprise "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/recaptchaenterprise" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("RecaptchaEnterpriseKey", &resource.Sweeper{ + Name: "RecaptchaEnterpriseKey", + F: testSweepRecaptchaEnterpriseKey, + }) +} + +func testSweepRecaptchaEnterpriseKey(region string) error { + log.Print("[INFO][SWEEPER_LOG] Starting sweeper for RecaptchaEnterpriseKey") + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to be used for Delete arguments. + d := map[string]string{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + } + + client := NewDCLRecaptchaEnterpriseClient(config, config.userAgent, "", 0) + err = client.DeleteAllKey(context.Background(), d["project"], isDeletableRecaptchaEnterpriseKey) + if err != nil { + return err + } + return nil +} + +func isDeletableRecaptchaEnterpriseKey(r *recaptchaenterprise.Key) bool { + return isSweepableTestResource(*r.Name) +} diff --git a/google/tpgtools_utils.go b/google/tpgtools_utils.go new file mode 100644 index 00000000000..cdcb287d27b --- /dev/null +++ b/google/tpgtools_utils.go @@ -0,0 +1,26 @@ +package google + +import ( + "fmt" + "log" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func oldValue(old, new interface{}) interface{} { + return old +} + +func handleNotFoundDCLError(err error, d *schema.ResourceData, resourceName string) error { + if dcl.IsNotFound(err) { + log.Printf("[WARN] Removing %s because it's gone", resourceName) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + return errwrap.Wrapf( + fmt.Sprintf("Error when reading or editing %s: {{err}}", resourceName), err) +} diff --git a/website/docs/r/apigee_instance.html.markdown b/website/docs/r/apigee_instance.html.markdown index a4f84da1cf7..dc5d36a9b27 100644 --- a/website/docs/r/apigee_instance.html.markdown +++ b/website/docs/r/apigee_instance.html.markdown @@ -106,6 +106,44 @@ resource "google_apigee_instance" "apigee_instance" { peering_cidr_range = "SLASH_22" } ``` +## Example Usage - Apigee Instance Ip Range + + +```hcl +data "google_client_config" "current" {} + +resource "google_compute_network" "apigee_network" { + name = "apigee-network" +} + +resource "google_compute_global_address" "apigee_range" { + name = "apigee-range" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 22 + network = google_compute_network.apigee_network.id +} + +resource "google_service_networking_connection" "apigee_vpc_connection" { + network = google_compute_network.apigee_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.apigee_range.name] +} + +resource "google_apigee_organization" "apigee_org" { + analytics_region = "us-central1" + project_id = data.google_client_config.current.project + authorized_network = google_compute_network.apigee_network.id + depends_on = [google_service_networking_connection.apigee_vpc_connection] +} + +resource "google_apigee_instance" "apigee_instance" { + name = "tf-test%{random_suffix}" + location = "us-central1-b" + org_id = google_apigee_organization.apigee_org.id + ip_range = "10.87.8.0/22" +} +``` ## Example Usage - Apigee Instance Full @@ -212,6 +250,16 @@ The following arguments are supported: The size of the CIDR block range that will be reserved by the instance. For valid values, see [CidrRange](https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances#CidrRange) on the documentation. +* `ip_range` - + (Optional) + IP range represents the customer-provided CIDR block of length 22 that will be used for + the Apigee instance creation. This optional range, if provided, should be freely + available as part of larger named range the customer has allocated to the Service + Networking peering. If this is not provided, Apigee will automatically request for any + available /22 CIDR block from Service Networking. The customer should use this CIDR block + for configuring their firewall needs to allow traffic from Apigee. + Input format: "a.b.c.d/22" + * `description` - (Optional) Description of the instance. diff --git a/website/docs/r/assured_workloads_workload.html.markdown b/website/docs/r/assured_workloads_workload.html.markdown new file mode 100644 index 00000000000..907d31aa5cf --- /dev/null +++ b/website/docs/r/assured_workloads_workload.html.markdown @@ -0,0 +1,165 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "AssuredWorkloads" +layout: "google" +page_title: "Google: google_assured_workloads_workload" +description: |- +The AssuredWorkloads Workload resource +--- + +# google_assured_workloads_workload + +The AssuredWorkloads Workload resource + +## Example Usage - basic_workload +A basic test of a assuredworkloads api +```hcl +resource "google_assured_workloads_workload" "primary" { + billing_account = "billingAccounts/000000-0000000-0000000-000000" + compliance_regime = "FEDRAMP_MODERATE" + display_name = "Workload Example" + location = "us-west1" + organization = "123456789" + + kms_settings { + next_rotation_time = "9999-10-02T15:01:23Z" + rotation_period = "10368000s" + } + + labels = { + label-one = "value-one" + } + + provisioned_resources_parent = "folders/519620126891" + + resource_settings { + resource_type = "CONSUMER_PROJECT" + } + + resource_settings { + resource_type = "ENCRYPTION_KEYS_PROJECT" + } + + resource_settings { + resource_id = "ring" + resource_type = "KEYRING" + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `billing_account` - + (Required) + Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, 'billingAccounts/012345-567890-ABCDEF`. + +* `compliance_regime` - + (Required) + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS + +* `display_name` - + (Required) + Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload + +* `location` - + (Required) + The location for the resource + +* `organization` - + (Required) + The organization for the resource + + + +- - - + +* `kms_settings` - + (Optional) + Input only. Settings used to create a CMEK crypto key. When set a project with a KMS CMEK key is provisioned. This field is mandatory for a subset of Compliance Regimes. + +* `labels` - + (Optional) + Optional. Labels applied to the workload. + +* `provisioned_resources_parent` - + (Optional) + Input only. The parent resource for the resources managed by this Assured Workload. May be either an organization or a folder. Must be the same or a child of the Workload parent. If not specified all resources are created under the Workload parent. Formats: folders/{folder_id}, organizations/{organization_id} + +* `resource_settings` - + (Optional) + Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. + + + +The `kms_settings` block supports: + +* `next_rotation_time` - + (Required) + Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary. + +* `rotation_period` - + (Required) + Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours. + +The `resource_settings` block supports: + +* `resource_id` - + (Optional) + Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail. + +* `resource_type` - + (Optional) + Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}` + +* `create_time` - + Output only. Immutable. The Workload creation timestamp. + +* `name` - + Output only. The resource name of the workload. + +* `resources` - + Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Workload can be imported using any of these accepted formats: + +``` +$ terraform import google_assured_workloads_workload.default organizations/{{organization}}/locations/{{location}}/workloads/{{name}} +$ terraform import google_assured_workloads_workload.default {{organization}}/{{location}}/{{name}} +``` + + + diff --git a/website/docs/r/container_aws_cluster.html.markdown b/website/docs/r/container_aws_cluster.html.markdown new file mode 100644 index 00000000000..a505c4fc32e --- /dev/null +++ b/website/docs/r/container_aws_cluster.html.markdown @@ -0,0 +1,382 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "ContainerAws" +layout: "google" +page_title: "Google: google_container_aws_cluster" +description: |- +An Anthos cluster running on AWS. +--- + +# google_container_aws_cluster + +An Anthos cluster running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +## Example Usage - basic_aws_cluster +A basic example of a containeraws cluster +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "emailAddress:my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "emailAddress:my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Required. Configuration related to the cluster RBAC settings. + +* `aws_region` - + (Required) + Required. The AWS region where the cluster runs. Each Google Cloud region supports a subset of nearby AWS regions. You can call to list all supported AWS regions within a given Google Cloud region. + +* `control_plane` - + (Required) + Required. Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Required. Cluster-wide networking configuration. + + + +The `authorization` block supports: + +* `admin_users` - + (Required) + Required. Users to perform operations as a cluster admin. A managed ClusterRoleBinding will be created to grant the `cluster-admin` ClusterRole to the users. At most one user can be specified. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `admin_users` block supports: + +* `username` - + (Required) + Required. The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `control_plane` block supports: + +* `aws_services_authentication` - + (Required) + Required. Authentication configuration for management of AWS resources. + +* `config_encryption` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt cluster configuration. + +* `database_encryption` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt cluster secrets. + +* `iam_instance_profile` - + (Required) + Required. The name of the AWS IAM instance pofile to assign to each control plane replica. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `t3.medium`. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to control plane replicas. The Anthos Multi-Cloud API will automatically create and manage security groups with the minimum rules needed for a functioning cluster. + +* `ssh_config` - + (Optional) + Optional. SSH configuration for how to access the underlying control plane machines. + +* `subnet_ids` - + (Required) + Required. The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ). + +* `tags` - + (Optional) + Optional. A set of AWS resource tags to propagate to all underlying managed AWS resources. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `version` - + (Required) + Required. The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling . + +The `aws_services_authentication` block supports: + +* `role_arn` - + (Required) + Required. The Amazon Resource Name (ARN) of the role that the Anthos Multi-Cloud API will assume when managing AWS resources on your account. + +* `role_session_name` - + (Optional) + Optional. An identifier for the assumed role session. When unspecified, it defaults to `multicloud-service-agent`. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt cluster configuration. + +The `database_encryption` block supports: + +* `kms_key_arn` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt cluster secrets. + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `pod_address_cidr_blocks` - + (Required) + Required. All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + Required. All services in the cluster are assigned an RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `vpc_id` - + (Required) + Required. The VPC associated with the cluster. All component clusters (i.e. control plane and node pools) run on a single VPC. This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `project` - + (Optional) + The project for the resource + + + +The `main_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `proxy_config` block supports: + +* `secret_arn` - + (Required) + The ARN of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +* `secret_version` - + (Required) + The version string of the AWS Secret Manager secret that contains the HTTP(S) proxy configuration. + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + Required. The name of the EC2 key pair used to login into cluster machines. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: + +``` +$ terraform import google_container_aws_cluster.default projects/{{project}}/locations/{{location}}/awsClusters/{{name}} +$ terraform import google_container_aws_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_aws_cluster.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/container_aws_node_pool.html.markdown b/website/docs/r/container_aws_node_pool.html.markdown new file mode 100644 index 00000000000..37c5dd3fee4 --- /dev/null +++ b/website/docs/r/container_aws_node_pool.html.markdown @@ -0,0 +1,373 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "ContainerAws" +layout: "google" +page_title: "Google: google_container_aws_node_pool" +description: |- +An Anthos node pool running on AWS. +--- + +# google_container_aws_node_pool + +An Anthos node pool running on AWS. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +## Example Usage - basic_aws_cluster +A basic example of a containeraws node pool +```hcl +data "google_container_aws_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_aws_cluster" "primary" { + authorization { + admin_users { + username = "emailAddress:my@service-account.com" + } + } + + aws_region = "my-aws-region" + + control_plane { + aws_services_authentication { + role_arn = "arn:aws:iam::012345678910:role/my--1p-dev-oneplatform" + role_session_name = "my--1p-dev-session" + } + + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + database_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-controlplane" + subnet_ids = ["subnet-00000000000000000"] + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + instance_type = "t3.medium" + + main_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + proxy_config { + secret_arn = "arn:aws:secretsmanager:us-west-2:126285863215:secret:proxy_config20210824150329476300000001-ABCDEF" + secret_version = "12345678-ABCD-EFGH-IJKL-987654321098" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + owner = "emailAddress:my@service-account.com" + } + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.2.0.0/16"] + service_address_cidr_blocks = ["10.1.0.0/16"] + vpc_id = "vpc-00000000000000000" + } + + annotations = { + label-one = "value-one" + } + + description = "A sample aws cluster" + project = "my-project-name" +} + + +resource "google_container_aws_node_pool" "primary" { + autoscaling { + max_node_count = 5 + min_node_count = 1 + } + + cluster = google_container_aws_cluster.primary.name + + config { + config_encryption { + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + } + + iam_instance_profile = "my--1p-dev-nodepool" + instance_type = "t3.medium" + + labels = { + label-one = "value-one" + } + + root_volume { + iops = 3000 + kms_key_arn = "arn:aws:kms:my-aws-region:012345678910:key/12345678-1234-1234-1234-123456789111" + size_gib = 10 + volume_type = "GP3" + } + + security_group_ids = ["sg-00000000000000000"] + + ssh_config { + ec2_key_pair = "my--1p-dev-ssh" + } + + tags = { + tag-one = "value-one" + } + + taints { + effect = "PREFER_NO_SCHEDULE" + key = "taint-key" + value = "taint-value" + } + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "subnet-00000000000000000" + version = "${data.google_container_aws_versions.versions.valid_versions[0]}" + + annotations = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Required. Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The awsCluster for the resource + +* `config` - + (Required) + Required. The configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + Required. The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + Required. The subnet where the node pool node run. + +* `version` - + (Required) + Required. The Kubernetes version to run on this node pool (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAwsServerConfig. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Required. Maximum number of nodes in the NodePool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Required. Minimum number of nodes in the NodePool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `config_encryption` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt node pool configuration. + +* `iam_instance_profile` - + (Required) + Required. The name of the AWS IAM role assigned to nodes in the pool. + +* `instance_type` - + (Optional) + Optional. The AWS instance type. When unspecified, it defaults to `t3.medium`. + +* `labels` - + (Optional) + Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +* `root_volume` - + (Optional) + Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type. + +* `security_group_ids` - + (Optional) + Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster. + +* `ssh_config` - + (Optional) + Optional. The SSH configuration. + +* `tags` - + (Optional) + Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `taints` - + (Optional) + Optional. The initial taints assigned to nodes of this node pool. + +The `config_encryption` block supports: + +* `kms_key_arn` - + (Required) + Required. The ARN of the AWS KMS key used to encrypt node pool configuration. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + Required. The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Key can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +* `project` - + (Optional) + The project for the resource + + + +The `root_volume` block supports: + +* `iops` - + (Optional) + Optional. The number of I/O operations per second (IOPS) to provision for GP3 volume. + +* `kms_key_arn` - + (Optional) + Optional. The Amazon Resource Name (ARN) of the Customer Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the default Amazon managed key associated to the AWS region where this cluster runs will be used. + +* `size_gib` - + (Optional) + Optional. The size of the volume, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +* `volume_type` - + (Optional) + Optional. Type of the EBS volume. When unspecified, it defaults to GP2 volume. Possible values: VOLUME_TYPE_UNSPECIFIED, GP2, GP3 + +The `ssh_config` block supports: + +* `ec2_key_pair` - + (Required) + Required. The name of the EC2 key pair used to login into cluster machines. + +The `taints` block supports: + +* `effect` - + (Required) + Required. The taint effect. Possible values: EFFECT_UNSPECIFIED, NO_SCHEDULE, PREFER_NO_SCHEDULE, NO_EXECUTE + +* `key` - + (Required) + Required. Key for the taint. + +* `value` - + (Required) + Required. Value for the taint. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the node pool. + +* `state` - + Output only. The lifecycle state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: + +``` +$ terraform import google_container_aws_node_pool.default projects/{{project}}/locations/{{location}}/awsClusters/{{cluster}}/awsNodePools/{{name}} +$ terraform import google_container_aws_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_aws_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/website/docs/r/container_azure_client.html.markdown b/website/docs/r/container_azure_client.html.markdown new file mode 100644 index 00000000000..1337f4ec903 --- /dev/null +++ b/website/docs/r/container_azure_client.html.markdown @@ -0,0 +1,106 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "ContainerAzure" +layout: "google" +page_title: "Google: google_container_azure_client" +description: |- +AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. +--- + +# google_container_azure_client + +AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +## Example Usage - basic_azure_client +A basic example of a containerazure azure client +```hcl +resource "google_container_azure_client" "primary" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `application_id` - + (Required) + Required. The Azure Active Directory Application ID. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `tenant_id` - + (Required) + Required. The Azure Active Directory Tenant ID. + + + +- - - + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClients/{{name}}` + +* `certificate` - + Output only. The PEM encoded x509 certificate. + +* `create_time` - + Output only. The time at which this resource was created. + +* `uid` - + Output only. A globally unique identifier for the client. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Client can be imported using any of these accepted formats: + +``` +$ terraform import google_container_azure_client.default projects/{{project}}/locations/{{location}}/azureClients/{{name}} +$ terraform import google_container_azure_client.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_client.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/container_azure_cluster.html.markdown b/website/docs/r/container_azure_cluster.html.markdown new file mode 100644 index 00000000000..0989b9dc8d9 --- /dev/null +++ b/website/docs/r/container_azure_cluster.html.markdown @@ -0,0 +1,313 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "ContainerAzure" +layout: "google" +page_title: "Google: google_container_azure_cluster" +description: |- +An Anthos cluster running on Azure. +--- + +# google_container_azure_cluster + +An Anthos cluster running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +## Example Usage - basic_azure_cluster +A basic example of a containerazure azure cluster +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `authorization` - + (Required) + Required. Configuration related to the cluster RBAC settings. + +* `azure_region` - + (Required) + Required. The Azure region where the cluster runs. Each Google Cloud region supports a subset of nearby Azure regions. You can call to list all supported Azure regions within a given Google Cloud region. + +* `client` - + (Required) + Required. Name of the AzureClient. The `AzureClient` resource must reside on the same GCP project and region as the `AzureCluster`. `AzureClient` names are formatted as `projects//locations//azureClients/`. See Resource Names (https:cloud.google.com/apis/design/resource_names) for more details on Google Cloud resource names. + +* `control_plane` - + (Required) + Required. Configuration related to the cluster control plane. + +* `fleet` - + (Required) + Fleet configuration. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The name of this resource. + +* `networking` - + (Required) + Required. Cluster-wide networking configuration. + +* `resource_group_id` - + (Required) + Required. The ARM ID of the resource group where the cluster resources are deployed. For example: `/subscriptions/*/resourceGroups/*` + + + +The `authorization` block supports: + +* `admin_users` - + (Required) + Required. Users that can perform operations as a cluster admin. A new ClusterRoleBinding will be created to grant the cluster-admin ClusterRole to the users. At most one user can be specified. For more info on RBAC, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + +The `admin_users` block supports: + +* `username` - + (Required) + Required. The name of the user, e.g. `my-gcp-id@gmail.com`. + +The `control_plane` block supports: + +* `database_encryption` - + (Optional) + Optional. Configuration related to application-layer secrets encryption. + +* `main_volume` - + (Optional) + Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. When unspecified, it defaults to a 8-GiB Azure Disk. + +* `proxy_config` - + (Optional) + Proxy configuration for outbound HTTP(S) traffic. + +* `replica_placements` - + (Optional) + Configuration for where to place the control plane replicas. Up to three replica placement instances can be specified. If replica_placements is set, the replica placement instances will be applied to the three control plane replicas as evenly as possible. + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each control plane replica. When unspecified, it defaults to 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + Required. SSH configuration for how to access the underlying control plane machines. + +* `subnet_id` - + (Required) + Required. The ARM ID of the subnet where the control plane VMs are deployed. Example: `/subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/default`. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying control plane Azure resources. + +* `version` - + (Required) + Required. The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling GetAzureServerConfig. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. For available VM sizes, see https://docs.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions. When unspecified, it defaults to `Standard_DS2_v2`. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + Required. The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `fleet` block supports: + +* `membership` - + The name of the managed Hub Membership resource associated to this cluster. Membership names are formatted as projects//locations/global/membership/. + +* `project` - + (Optional) + The number of the Fleet host project where this cluster will be registered. + +The `networking` block supports: + +* `pod_address_cidr_blocks` - + (Required) + Required. The IP address range of the pods in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All pods in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creation. + +* `service_address_cidr_blocks` - + (Required) + Required. The IP address range for services in this cluster, in CIDR notation (e.g. `10.96.0.0/14`). All services in the cluster get assigned a unique RFC1918 IPv4 address from these ranges. Only a single range is supported. This field cannot be changed after creating a cluster. + +* `virtual_network_id` - + (Required) + Required. The Azure Resource Manager (ARM) ID of the VNet associated with your cluster. All components in the cluster (i.e. control plane and node pools) run on a single VNet. Example: `/subscriptions/*/resourceGroups/*/providers/Microsoft.Network/virtualNetworks/*` This field cannot be changed after creation. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the cluster. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +* `description` - + (Optional) + Optional. A human readable description of this cluster. Cannot be longer than 255 UTF-8 encoded bytes. + +* `project` - + (Optional) + The project for the resource + + + +The `database_encryption` block supports: + +* `key_id` - + (Required) + The ARM ID of the Azure Key Vault key to encrypt / decrypt data. For example: `/subscriptions//resourceGroups//providers/Microsoft.KeyVault/vaults//keys/` Encryption will always take the latest version of the key and hence specific version is not supported. + +The `main_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +The `proxy_config` block supports: + +* `resource_group_id` - + (Required) + The ARM ID the of the resource group containing proxy keyvault. Resource group ids are formatted as `/subscriptions//resourceGroups/` + +* `secret_id` - + (Required) + The URL the of the proxy setting secret with its version. Secret ids are formatted as `https:.vault.azure.net/secrets//`. + +The `replica_placements` block supports: + +* `azure_availability_zone` - + (Required) + For a given replica, the Azure availability zone where to provision the control plane VM and the ETCD disk. + +* `subnet_id` - + (Required) + For a given replica, the ARM ID of the subnet where the control plane VM is deployed. Make sure it's a subnet under the virtual network in the cluster configuration. + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{name}}` + +* `create_time` - + Output only. The time at which this cluster was created. + +* `endpoint` - + Output only. The endpoint of the cluster's API server. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently changes in flight to the cluster. + +* `state` - + Output only. The current state of the cluster. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the cluster. + +* `update_time` - + Output only. The time at which this cluster was last updated. + +* `workload_identity_config` - + Output only. Workload Identity settings. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Cluster can be imported using any of these accepted formats: + +``` +$ terraform import google_container_azure_cluster.default projects/{{project}}/locations/{{location}}/azureClusters/{{name}} +$ terraform import google_container_azure_cluster.default {{project}}/{{location}}/{{name}} +$ terraform import google_container_azure_cluster.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/container_azure_node_pool.html.markdown b/website/docs/r/container_azure_node_pool.html.markdown new file mode 100644 index 00000000000..2cc61363f81 --- /dev/null +++ b/website/docs/r/container_azure_node_pool.html.markdown @@ -0,0 +1,269 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "ContainerAzure" +layout: "google" +page_title: "Google: google_container_azure_node_pool" +description: |- +An Anthos node pool running on Azure. +--- + +# google_container_azure_node_pool + +An Anthos node pool running on Azure. + +For more information, see: +* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +## Example Usage - basic_azure_node_pool +A basic example of a containerazure azure node pool +```hcl +data "google_container_azure_versions" "versions" { + project = "my-project-name" + location = "us-west1" +} + +resource "google_container_azure_cluster" "primary" { + authorization { + admin_users { + username = "mmv2@google.com" + } + } + + azure_region = "westus2" + client = "projects/my-project-number/locations/us-west1/azureClients/${google_container_azure_client.basic.name}" + + control_plane { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + } + + fleet { + project = "my-project-number" + } + + location = "us-west1" + name = "name" + + networking { + pod_address_cidr_blocks = ["10.200.0.0/16"] + service_address_cidr_blocks = ["10.32.0.0/24"] + virtual_network_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet" + } + + resource_group_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-cluster" + project = "my-project-name" +} + +resource "google_container_azure_client" "basic" { + application_id = "12345678-1234-1234-1234-123456789111" + location = "us-west1" + name = "client-name" + tenant_id = "12345678-1234-1234-1234-123456789111" + project = "my-project-name" +} + +resource "google_container_azure_node_pool" "primary" { + autoscaling { + max_node_count = 3 + min_node_count = 2 + } + + cluster = google_container_azure_cluster.primary.name + + config { + ssh_config { + authorized_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8yaayO6lnb2v+SedxUMa2c8vtIEzCzBjM3EJJsv8Vm9zUDWR7dXWKoNGARUb2mNGXASvI6mFIDXTIlkQ0poDEPpMaXR0g2cb5xT8jAAJq7fqXL3+0rcJhY/uigQ+MrT6s+ub0BFVbsmGHNrMQttXX9gtmwkeAEvj3mra9e5pkNf90qlKnZz6U0SVArxVsLx07vHPHDIYrl0OPG4zUREF52igbBPiNrHJFDQJT/4YlDMJmo/QT/A1D6n9ocemvZSzhRx15/Arjowhr+VVKSbaxzPtEfY0oIg2SrqJnnr/l3Du5qIefwh5VmCZe4xopPUaDDoOIEFriZ88sB+3zz8ib8sk8zJJQCgeP78tQvXCgS+4e5W3TUg9mxjB6KjXTyHIVhDZqhqde0OI3Fy1UuVzRUwnBaLjBnAwP5EoFQGRmDYk/rEYe7HTmovLeEBUDQocBQKT4Ripm/xJkkWY7B07K/tfo56dGUCkvyIVXKBInCh+dLK7gZapnd4UWkY0xBYcwo1geMLRq58iFTLA2j/JmpmHXp7m0l7jJii7d44uD3tTIFYThn7NlOnvhLim/YcBK07GMGIN7XwrrKZKmxXaspw6KBWVhzuw1UPxctxshYEaMLfFg/bwOw8HvMPr9VtrElpSB7oiOh91PDIPdPBgHCi7N2QgQ5l/ZDBHieSpNrQ== thomasrodgers" + } + + root_volume { + size_gib = 32 + } + + tags = { + owner = "mmv2" + } + + vm_size = "Standard_DS2_v2" + } + + location = "us-west1" + + max_pods_constraint { + max_pods_per_node = 110 + } + + name = "node-pool-name" + subnet_id = "/subscriptions/12345678-1234-1234-1234-123456789111/resourceGroups/my--dev-byo/providers/Microsoft.Network/virtualNetworks/my--dev-vnet/subnets/default" + version = "${data.google_container_azure_versions.versions.valid_versions[0]}" + + annotations = { + annotation-one = "value-one" + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `autoscaling` - + (Required) + Required. Autoscaler configuration for this node pool. + +* `cluster` - + (Required) + The azureCluster for the resource + +* `config` - + (Required) + Required. The node configuration of the node pool. + +* `location` - + (Required) + The location for the resource + +* `max_pods_constraint` - + (Required) + Required. The constraint on the maximum number of pods that can be run simultaneously on a node in the node pool. + +* `name` - + (Required) + The name of this resource. + +* `subnet_id` - + (Required) + Required. The ARM ID of the subnet where the node pool VMs run. Make sure it's a subnet under the virtual network in the cluster configuration. + +* `version` - + (Required) + Required. The Kubernetes version (e.g. `1.19.10-gke.1000`) running on this node pool. + + + +The `autoscaling` block supports: + +* `max_node_count` - + (Required) + Required. Maximum number of nodes in the node pool. Must be >= min_node_count. + +* `min_node_count` - + (Required) + Required. Minimum number of nodes in the node pool. Must be >= 1 and <= max_node_count. + +The `config` block supports: + +* `root_volume` - + (Optional) + Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk. + +* `ssh_config` - + (Required) + Required. SSH configuration for how to access the node pool machines. + +* `tags` - + (Optional) + Optional. A set of tags to apply to all underlying Azure resources for this node pool. This currently only includes Virtual Machine Scale Sets. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters. + +* `vm_size` - + (Optional) + Optional. The Azure VM size name. Example: `Standard_DS2_v2`. See (/anthos/clusters/docs/azure/reference/supported-vms) for options. When unspecified, it defaults to `Standard_DS2_v2`. + +The `ssh_config` block supports: + +* `authorized_key` - + (Required) + Required. The SSH public key data for VMs managed by Anthos. This accepts the authorized_keys file format used in OpenSSH according to the sshd(8) manual page. + +The `max_pods_constraint` block supports: + +* `max_pods_per_node` - + (Required) + Required. The maximum number of pods to schedule on a single node. + +- - - + +* `annotations` - + (Optional) + Optional. Annotations on the node pool. This field has the same restrictions as Kubernetes annotations. The total size of all keys and values combined is limited to 256k. Keys can have 2 segments: prefix (optional) and name (required), separated by a slash (/). Prefix must be a DNS subdomain. Name must be 63 characters or less, begin and end with alphanumerics, with dashes (-), underscores (_), dots (.), and alphanumerics between. + +* `azure_availability_zone` - + (Optional) + Optional. The Azure availability zone of the nodes in this nodepool. When unspecified, it defaults to `1`. + +* `project` - + (Optional) + The project for the resource + + + +The `root_volume` block supports: + +* `size_gib` - + (Optional) + Optional. The size of the disk, in GiBs. When unspecified, a default value is provided. See the specific reference in the parent resource. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}}` + +* `create_time` - + Output only. The time at which this node pool was created. + +* `etag` - + Allows clients to perform consistent read-modify-writes through optimistic concurrency control. May be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + +* `reconciling` - + Output only. If set, there are currently pending changes to the node pool. + +* `state` - + Output only. The current state of the node pool. Possible values: STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, DEGRADED + +* `uid` - + Output only. A globally unique identifier for the node pool. + +* `update_time` - + Output only. The time at which this node pool was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +NodePool can be imported using any of these accepted formats: + +``` +$ terraform import google_container_azure_node_pool.default projects/{{project}}/locations/{{location}}/azureClusters/{{cluster}}/azureNodePools/{{name}} +$ terraform import google_container_azure_node_pool.default {{project}}/{{location}}/{{cluster}}/{{name}} +$ terraform import google_container_azure_node_pool.default {{location}}/{{cluster}}/{{name}} +``` + + + diff --git a/website/docs/r/eventarc_trigger.html.markdown b/website/docs/r/eventarc_trigger.html.markdown new file mode 100644 index 00000000000..a13eb55fdf7 --- /dev/null +++ b/website/docs/r/eventarc_trigger.html.markdown @@ -0,0 +1,206 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "Eventarc" +layout: "google" +page_title: "Google: google_eventarc_trigger" +description: |- +The Eventarc Trigger resource +--- + +# google_eventarc_trigger + +The Eventarc Trigger resource + +## Example Usage - basic +```hcl +resource "google_eventarc_trigger" "primary" { + name = "name" + location = "europe-west1" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + cloud_run_service { + service = google_cloud_run_service.default.name + region = "europe-west1" + } + } + labels = { + foo = "bar" + } +} + +resource "google_pubsub_topic" "foo" { + name = "topic" +} + +resource "google_cloud_run_service" "default" { + name = "eventarc-service" + location = "europe-west1" + + metadata { + namespace = "my-project-name" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + args = ["arrgs"] + } + container_concurrency = 50 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `destination` - + (Required) + Required. Destination specifies where the events should be sent to. + +* `location` - + (Required) + The location for the resource + +* `matching_criteria` - + (Required) + Required. null The list of filters that applies to event attributes. Only events that match all the provided filters will be sent to the destination. + +* `name` - + (Required) + Required. The resource name of the trigger. Must be unique within the location on the project and must be in `projects/{project}/locations/{location}/triggers/{trigger}` format. + + + +The `destination` block supports: + +* `cloud_function` - + (Optional) + The Cloud Function resource name. Only Cloud Functions V2 is supported. Format: projects/{project}/locations/{location}/functions/{function} + +* `cloud_run_service` - + (Optional) + Cloud Run fully-managed service that receives the events. The service should be running in the same project of the trigger. + +The `matching_criteria` block supports: + +* `attribute` - + (Required) + Required. The name of a CloudEvents attribute. Currently, only a subset of attributes are supported for filtering. All triggers MUST provide a filter for the 'type' attribute. + +* `value` - + (Required) + Required. The value for the attribute. + +- - - + +* `labels` - + (Optional) + Optional. User labels attached to the triggers that can be used to group resources. + +* `project` - + (Optional) + The project for the resource + +* `service_account` - + (Optional) + Optional. The IAM service account email associated with the trigger. The service account represents the identity of the trigger. The principal who calls this API must have `iam.serviceAccounts.actAs` permission in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts?hl=en#sa_common for more information. For Cloud Run destinations, this service account is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account for information on how to invoke authenticated Cloud Run services. In order to create Audit Log triggers, the service account should also have `roles/eventarc.eventReceiver` IAM role. + +* `transport` - + (Optional) + Optional. In order to deliver messages, Eventarc may use other GCP products as transport intermediary. This field contains a reference to that transport intermediary. This information can be used for debugging purposes. + + + +The `cloud_run_service` block supports: + +* `path` - + (Optional) + Optional. The relative path on the Cloud Run service the events should be sent to. The value must conform to the definition of URI path segment (section 3.3 of RFC2396). Examples: "/route", "route", "route/subroute". + +* `region` - + (Optional) + Required. The region the Cloud Run service is deployed in. + +* `service` - + (Required) + Required. The name of the Cloud Run service being addressed. See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services. Only services located in the same project of the trigger object can be addressed. + +The `transport` block supports: + +* `pubsub` - + (Optional) + The Pub/Sub topic and subscription used by Eventarc as delivery intermediary. + +The `pubsub` block supports: + +* `subscription` - + Output only. The name of the Pub/Sub subscription created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`. + +* `topic` - + (Optional) + Optional. The name of the Pub/Sub topic created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME You may set an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished` only. The topic you provide here will not be deleted by Eventarc at trigger deletion. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/triggers/{{name}}` + +* `create_time` - + Output only. The creation time. + +* `etag` - + Output only. This checksum is computed by the server based on the value of other fields, and may be sent only on create requests to ensure the client has an up-to-date value before proceeding. + +* `uid` - + Output only. Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted. + +* `update_time` - + Output only. The last-modified time. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Trigger can be imported using any of these accepted formats: + +``` +$ terraform import google_eventarc_trigger.default projects/{{project}}/locations/{{location}}/triggers/{{name}} +$ terraform import google_eventarc_trigger.default {{project}}/{{location}}/{{name}} +$ terraform import google_eventarc_trigger.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/monitoring_monitored_project.html.markdown b/website/docs/r/monitoring_monitored_project.html.markdown new file mode 100644 index 00000000000..22f4109f60a --- /dev/null +++ b/website/docs/r/monitoring_monitored_project.html.markdown @@ -0,0 +1,92 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud (Stackdriver) Monitoring" +layout: "google" +page_title: "Google: google_monitoring_monitored_project" +description: |- +Beta only: Monitored Project allows you to set a project as monitored by a _metrics scope_, which is a term for a project used to group the metrics of multiple projects, potentially across multiple organizations. This enables you to view these groups in the Monitoring page of the cloud console. +--- + +# google_monitoring_monitored_project + +Beta only: Monitored Project allows you to set a project as monitored by a _metrics scope_, which is a term for a project used to group the metrics of multiple projects, potentially across multiple organizations. This enables you to view these groups in the Monitoring page of the cloud console. + +For more information, see: +* [Understanding metrics scopes](https://cloud.google.com/monitoring/settings#concept-scope) +* [API notes](https://cloud.google.com/monitoring/settings/manage-api) +## Example Usage - basic_monitored_project +A basic example of a monitoring monitored project +```hcl +resource "google_monitoring_monitored_project" "primary" { + metrics_scope = "existing-metrics-scope-project" + name = google_project.basic.name + provider = google-beta +} +resource "google_project" "basic" { + project_id = "my-monitored-project" + name = "my-monitored-project" + org_id = "123456789" + provider = google-beta +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `metrics_scope` - + (Required) + Required. The resource name of the existing Metrics Scope that will monitor this project. Example: locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER} + +* `name` - + (Required) + Immutable. The resource name of the `MonitoredProject`. On input, the resource name includes the scoping project ID and monitored project ID. On output, it contains the equivalent project numbers. Example: `locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}` + + + +- - - + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}}` + +* `create_time` - + Output only. The time when this `MonitoredProject` was created. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +MonitoredProject can be imported using any of these accepted formats: + +``` +$ terraform import google_monitoring_monitored_project.default locations/global/metricsScopes/{{metrics_scope}}/projects/{{name}} +$ terraform import google_monitoring_monitored_project.default {{metrics_scope}}/{{name}} +``` + + + diff --git a/website/docs/r/network_connectivity_hub.html.markdown b/website/docs/r/network_connectivity_hub.html.markdown new file mode 100644 index 00000000000..f0f666b6a8e --- /dev/null +++ b/website/docs/r/network_connectivity_hub.html.markdown @@ -0,0 +1,111 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "NetworkConnectivity" +layout: "google" +page_title: "Google: google_network_connectivity_hub" +description: |- +The NetworkConnectivity Hub resource +--- + +# google_network_connectivity_hub + +The NetworkConnectivity Hub resource + +## Example Usage - basic_hub +A basic test of a networkconnectivity hub +```hcl +resource "google_network_connectivity_hub" "primary" { + name = "hub" + description = "A sample hub" + + labels = { + label-one = "value-one" + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + Immutable. The name of the hub. Hub names must be unique. They use the following form: `projects/{project_number}/locations/global/hubs/{hub_id}` + + + +- - - + +* `description` - + (Optional) + An optional description of the hub. + +* `labels` - + (Optional) + Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). + +* `project` - + (Optional) + The project for the resource + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/hubs/{{name}}` + +* `create_time` - + Output only. The time the hub was created. + +* `routing_vpcs` - + The VPC network associated with this hub's spokes. All of the VPN tunnels, VLAN attachments, and router appliance instances referenced by this hub's spokes must belong to this VPC network. This field is read-only. Network Connectivity Center automatically populates it based on the set of spokes attached to the hub. + +* `state` - + Output only. The current lifecycle state of this hub. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING + +* `unique_id` - + Output only. The Google-generated UUID for the hub. This value is unique across all hub resources. If a hub is deleted and another with the same name is created, the new hub is assigned a different unique_id. + +* `update_time` - + Output only. The time the hub was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Hub can be imported using any of these accepted formats: + +``` +$ terraform import google_network_connectivity_hub.default projects/{{project}}/locations/global/hubs/{{name}} +$ terraform import google_network_connectivity_hub.default {{project}}/{{name}} +$ terraform import google_network_connectivity_hub.default {{name}} +``` + + + diff --git a/website/docs/r/network_connectivity_spoke.html.markdown b/website/docs/r/network_connectivity_spoke.html.markdown new file mode 100644 index 00000000000..05cd2c2f774 --- /dev/null +++ b/website/docs/r/network_connectivity_spoke.html.markdown @@ -0,0 +1,213 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "NetworkConnectivity" +layout: "google" +page_title: "Google: google_network_connectivity_spoke" +description: |- +The NetworkConnectivity Spoke resource +--- + +# google_network_connectivity_spoke + +The NetworkConnectivity Spoke resource + +## Example Usage - router_appliance +```hcl + +resource "google_compute_network" "network" { + name = "network" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnet" + ip_cidr_range = "10.0.0.0/28" + region = "us-west1" + network = google_compute_network.network.self_link +} + +resource "google_compute_instance" "instance" { + name = "instance" + machine_type = "e2-medium" + can_ip_forward = true + zone = "us-west1-a" + + boot_disk { + initialize_params { + image = "projects/debian-cloud/global/images/debian-10-buster-v20210817" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + network_ip = "10.0.0.2" + access_config { + network_tier = "PREMIUM" + } + } +} + +resource "google_network_connectivity_hub" "basic_hub" { + name = "hub" + description = "A sample hub" + labels = { + label-two = "value-one" + } +} + +resource "google_network_connectivity_spoke" "primary" { + name = "name" + location = "us-west1" + description = "A sample spoke with a linked routher appliance instance" + labels = { + label-one = "value-one" + } + hub = google_network_connectivity_hub.basic_hub.id + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = "10.0.0.2" + } + site_to_site_data_transfer = true + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `hub` - + (Required) + Immutable. The URI of the hub that this spoke is attached to. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Immutable. The name of the spoke. Spoke names must be unique. + + + +The `instances` block supports: + +* `ip_address` - + (Optional) + The IP address on the VM to use for peering. + +* `virtual_machine` - + (Optional) + The URI of the virtual machine resource + +- - - + +* `description` - + (Optional) + An optional description of the spoke. + +* `labels` - + (Optional) + Optional labels in key:value format. For more information about labels, see [Requirements for labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements). + +* `linked_interconnect_attachments` - + (Optional) + A collection of VLAN attachment resources. These resources should be redundant attachments that all advertise the same prefixes to Google Cloud. Alternatively, in active/passive configurations, all attachments should be capable of advertising the same prefixes. + +* `linked_router_appliance_instances` - + (Optional) + The URIs of linked Router appliance resources + +* `linked_vpn_tunnels` - + (Optional) + The URIs of linked VPN tunnel resources + +* `project` - + (Optional) + The project for the resource + + + +The `linked_interconnect_attachments` block supports: + +* `site_to_site_data_transfer` - + (Required) + A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. + +* `uris` - + (Required) + The URIs of linked interconnect attachment resources + +The `linked_router_appliance_instances` block supports: + +* `instances` - + (Required) + The list of router appliance instances + +* `site_to_site_data_transfer` - + (Required) + A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. + +The `linked_vpn_tunnels` block supports: + +* `site_to_site_data_transfer` - + (Required) + A value that controls whether site-to-site data transfer is enabled for these resources. Note that data transfer is available only in supported locations. + +* `uris` - + (Required) + The URIs of linked VPN tunnel resources. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/spokes/{{name}}` + +* `create_time` - + Output only. The time the spoke was created. + +* `state` - + Output only. The current lifecycle state of this spoke. Possible values: STATE_UNSPECIFIED, CREATING, ACTIVE, DELETING + +* `unique_id` - + Output only. The Google-generated UUID for the spoke. This value is unique across all spoke resources. If a spoke is deleted and another with the same name is created, the new spoke is assigned a different unique_id. + +* `update_time` - + Output only. The time the spoke was last updated. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Spoke can be imported using any of these accepted formats: + +``` +$ terraform import google_network_connectivity_spoke.default projects/{{project}}/locations/{{location}}/spokes/{{name}} +$ terraform import google_network_connectivity_spoke.default {{project}}/{{location}}/{{name}} +$ terraform import google_network_connectivity_spoke.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/org_policy_policy.html.markdown b/website/docs/r/org_policy_policy.html.markdown new file mode 100644 index 00000000000..c3db254c68a --- /dev/null +++ b/website/docs/r/org_policy_policy.html.markdown @@ -0,0 +1,244 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "OrgPolicy" +layout: "google" +page_title: "Google: google_org_policy_policy" +description: |- +An organization policy gives you programmatic control over your organization's cloud resources. Using Organization Policies, you will be able to configure constraints across your entire resource hierarchy. +--- + +# google_org_policy_policy + +An organization policy gives you programmatic control over your organization's cloud resources. Using Organization Policies, you will be able to configure constraints across your entire resource hierarchy. + +For more information, see: +* [Understanding Org Policy concepts](https://cloud.google.com/resource-manager/docs/organization-policy/overview) +* [The resource hierarchy](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy) +* [All valid constraints](https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints) +## Example Usage - enforce_policy +A test of an enforce orgpolicy policy for a project +```hcl +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + enforce = "FALSE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} + + +``` +## Example Usage - folder_policy +A test of an orgpolicy policy for a folder +```hcl +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = true + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/123456789" + display_name = "folder" +} + + +``` +## Example Usage - organization_policy +A test of an orgpolicy policy for an organization +```hcl +resource "google_org_policy_policy" "primary" { + name = "organizations/123456789/policies/gcp.detailedAuditLoggingMode" + parent = "organizations/123456789" + + spec { + reset = true + } +} + + +``` +## Example Usage - project_policy +A test of an orgpolicy policy for a project +```hcl +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + location = "sample-location.log" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - + (Required) + Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, "projects/123/policies/compute.disableSerialPortAccess". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number. + +* `parent` - + (Required) + The parent of the resource. + + + +- - - + +* `spec` - + (Optional) + Basic information about the Organization Policy. + + + +The `spec` block supports: + +* `etag` - + An opaque tag indicating the current version of the `Policy`, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the `Policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + +* `inherit_from_parent` - + (Optional) + Determines the inheritance behavior for this `Policy`. If `inherit_from_parent` is true, PolicyRules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this Policy becomes the new root for evaluation. This field can be set only for Policies which configure list constraints. + +* `reset` - + (Optional) + Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific `Constraint` at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + +* `rules` - + (Optional) + Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence. + +* `update_time` - + Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that `Policy`. + +The `rules` block supports: + +* `allow_all` - + (Optional) + Setting this to true means that all values are allowed. This field can be set only in Policies for list constraints. + +* `condition` - + (Optional) + A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr' must include from 1 to 10 subexpressions, joined by the "||" or "&&" operators. Each subexpression must be of the form "resource.matchTag('/tag_key_short_name, 'tag_value_short_name')". or "resource.matchTagId('tagKeys/key_id', 'tagValues/value_id')". where key_name and value_name are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: "resource.matchTag('123456789/environment, 'prod')". or "resource.matchTagId('tagKeys/123', 'tagValues/456')". + +* `deny_all` - + (Optional) + Setting this to true means that all values are denied. This field can be set only in Policies for list constraints. + +* `enforce` - + (Optional) + If `true`, then the `Policy` is enforced. If `false`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints. + +* `values` - + (Optional) + List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. + +The `condition` block supports: + +* `description` - + (Optional) + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + +* `expression` - + (Optional) + Textual representation of an expression in Common Expression Language syntax. + +* `location` - + (Optional) + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + +* `title` - + (Optional) + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + +The `values` block supports: + +* `allowed_values` - + (Optional) + List of values allowed at this resource. + +* `denied_values` - + (Optional) + List of values denied at this resource. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `{{parent}}/policies/{{name}}` + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Policy can be imported using any of these accepted formats: + +``` +$ terraform import google_org_policy_policy.default {{parent}}/policies/{{name}} +$ terraform import google_org_policy_policy.default {{parent}}/{{name}} +``` + + + diff --git a/website/docs/r/os_config_os_policy_assignment.html.markdown b/website/docs/r/os_config_os_policy_assignment.html.markdown new file mode 100644 index 00000000000..ed5f4676f79 --- /dev/null +++ b/website/docs/r/os_config_os_policy_assignment.html.markdown @@ -0,0 +1,1300 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "OsConfig" +layout: "google" +page_title: "Google: google_os_config_os_policy_assignment" +description: |- +Represents an OSPolicyAssignment resource. +--- + +# google_os_config_os_policy_assignment + +Represents an OSPolicyAssignment resource. + +## Example Usage - fixed_os_policy_assignment +An example of an osconfig os policy assignment with fixed rollout disruption budget +```hcl +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + + exclusion_labels { + labels = { + label-two = "value-two" + } + } + + inclusion_labels { + labels = { + label-one = "value-one" + } + } + + inventories { + os_short_name = "centos" + os_version = "8.*" + } + } + + location = "us-west1-a" + name = "assignment" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt" + + pkg { + desired_state = "INSTALLED" + + apt { + name = "bazel" + } + } + } + + resources { + id = "deb1" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + local_path = "$HOME/package.deb" + } + } + } + } + + resources { + id = "deb2" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + allow_insecure = true + + remote { + uri = "ftp.us.debian.org/debian/package.deb" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + + pull_deps = true + } + } + } + + resources { + id = "deb3" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + pull_deps = true + } + } + } + + resources { + id = "yum" + + pkg { + desired_state = "INSTALLED" + + yum { + name = "gstreamer-plugins-base-devel.x86_64" + } + } + } + + resources { + id = "zypper" + + pkg { + desired_state = "INSTALLED" + + zypper { + name = "gcc" + } + } + } + + resources { + id = "rpm1" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + local_path = "$HOME/package.rpm" + } + + pull_deps = true + } + } + } + + resources { + id = "rpm2" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + allow_insecure = true + + remote { + uri = "https://mirror.jaleco.com/centos/8.3.2011/BaseOS/x86_64/os/Packages/efi-filesystem-3-2.el8.noarch.rpm" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + } + } + } + + resources { + id = "rpm3" + + pkg { + desired_state = "INSTALLED" + + rpm { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + } + + inventory_filters { + os_short_name = "centos" + os_version = "8.*" + } + } + + resource_groups { + resources { + id = "apt-to-deb" + + pkg { + desired_state = "INSTALLED" + + apt { + name = "bazel" + } + } + } + + resources { + id = "deb-local-path-to-gcs" + + pkg { + desired_state = "INSTALLED" + + deb { + source { + local_path = "$HOME/package.deb" + } + } + } + } + + resources { + id = "googet" + + pkg { + desired_state = "INSTALLED" + + googet { + name = "gcc" + } + } + } + + resources { + id = "msi1" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + local_path = "$HOME/package.msi" + } + + properties = ["REBOOT=ReallySuppress"] + } + } + } + + resources { + id = "msi2" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + allow_insecure = true + + remote { + uri = "https://remote.uri.com/package.msi" + sha256_checksum = "3bbfd1043cd7afdb78cf9afec36c0c5370d2fea98166537b4e67f3816f256025" + } + } + } + } + } + + resources { + id = "msi3" + + pkg { + desired_state = "INSTALLED" + + msi { + source { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + } + } + + allow_no_resource_group_match = false + description = "A test os policy" + } + + rollout { + disruption_budget { + fixed = 1 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "my-project-name" +} + + +``` +## Example Usage - percent_os_policy_assignment +An example of an osconfig os policy assignment with percent rollout disruption budget +```hcl +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = true + } + + location = "us-west1-a" + name = "assignment" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + apt { + archive_type = "DEB" + components = ["doc"] + distribution = "debian" + uri = "https://atl.mirrors.clouvider.net/debian" + gpg_key = ".gnupg/pubring.kbx" + } + } + } + + resources { + id = "yum" + + repository { + yum { + base_url = "http://centos.s.uw.edu/centos/" + id = "yum" + display_name = "yum" + gpg_keys = ["RPM-GPG-KEY-CentOS-7"] + } + } + } + + resources { + id = "zypper" + + repository { + zypper { + base_url = "http://mirror.dal10.us.leaseweb.net/opensuse" + id = "zypper" + display_name = "zypper" + gpg_keys = ["sample-key-uri"] + } + } + } + + resources { + id = "goo" + + repository { + goo { + name = "goo" + url = "https://foo.com/googet/bar" + } + } + } + + resources { + id = "exec1" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec2" + + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + args = ["arg1"] + + file { + local_path = "$HOME/script.sh" + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "exec3" + + exec { + validate { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + + enforce { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + } + } + + resources { + id = "exec4" + + exec { + validate { + interpreter = "SHELL" + output_file_path = "$HOME/out" + script = "pwd" + } + + enforce { + interpreter = "SHELL" + + file { + allow_insecure = true + + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + + output_file_path = "$HOME/out" + } + } + } + + resources { + id = "file1" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + local_path = "$HOME/file" + } + } + } + } + + resource_groups { + resources { + id = "file2" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + allow_insecure = true + + remote { + uri = "https://www.example.com/file" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + } + } + + resources { + id = "file3" + + file { + path = "$HOME/file" + state = "PRESENT" + + file { + gcs { + bucket = "test-bucket" + object = "test-object" + generation = 1 + } + } + } + } + + resources { + id = "file4" + + file { + path = "$HOME/file" + state = "PRESENT" + content = "sample-content" + } + } + } + } + + rollout { + disruption_budget { + percent = 1 + } + + min_wait_duration = "3.5s" + } + + description = "A test os policy assignment" + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_filter` - + (Required) + Required. Filter to select VMs. + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + Resource name. + +* `os_policies` - + (Required) + Required. List of OS policies to be applied to the VMs. + +* `rollout` - + (Required) + Required. Rollout to deploy the OS policy assignment. A rollout is triggered in the following situations: 1) OSPolicyAssignment is created. 2) OSPolicyAssignment is updated and the update contains changes to one of the following fields: - instance_filter - os_policies 3) OSPolicyAssignment is deleted. + + + +The `instance_filter` block supports: + +* `all` - + (Optional) + Target all VMs in the project. If true, no other criteria is permitted. + +* `exclusion_labels` - + (Optional) + List of label sets used for VM exclusion. If the list has more than one label set, the VM is excluded if any of the label sets are applicable for the VM. + +* `inclusion_labels` - + (Optional) + List of label sets used for VM inclusion. If the list has more than one `LabelSet`, the VM is included if any of the label sets are applicable for the VM. + +* `inventories` - + (Optional) + List of inventories to select VMs. A VM is selected if its inventory data matches at least one of the following inventories. + +The `os_policies` block supports: + +* `allow_no_resource_group_match` - + (Optional) + This flag determines the OS policy compliance status when none of the resource groups within the policy are applicable for a VM. Set this value to `true` if the policy needs to be reported as compliant even if the policy has nothing to validate or enforce. + +* `description` - + (Optional) + Policy description. Length of the description is limited to 1024 characters. + +* `id` - + (Required) + Required. The id of the OS policy with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the assignment. + +* `mode` - + (Required) + Required. Policy mode Possible values: MODE_UNSPECIFIED, VALIDATION, ENFORCEMENT + +* `resource_groups` - + (Required) + Required. List of resource groups for the policy. For a particular VM, resource groups are evaluated in the order specified and the first resource group that is applicable is selected and the rest are ignored. If none of the resource groups are applicable for a VM, the VM is considered to be non-compliant w.r.t this policy. This behavior can be toggled by the flag `allow_no_resource_group_match` + +The `resource_groups` block supports: + +* `inventory_filters` - + (Optional) + List of inventory filters for the resource group. The resources in this resource group are applied to the target VM if it satisfies at least one of the following inventory filters. For example, to apply this resource group to VMs running either `RHEL` or `CentOS` operating systems, specify 2 items for the list with following values: inventory_filters[0].os_short_name='rhel' and inventory_filters[1].os_short_name='centos' If the list is empty, this resource group will be applied to the target VM unconditionally. + +* `resources` - + (Required) + Required. List of resources configured for this resource group. The resources are executed in the exact order specified here. + +The `resources` block supports: + +* `exec` - + (Optional) + Exec resource + +* `file` - + (Optional) + File resource + +* `id` - + (Required) + Required. The id of the resource with the following restrictions: * Must contain only lowercase letters, numbers, and hyphens. * Must start with a letter. * Must be between 1-63 characters. * Must end with a number or a letter. * Must be unique within the OS policy. + +* `pkg` - + (Optional) + Package resource + +* `repository` - + (Optional) + Package repository resource + +The `validate` block supports: + +* `args` - + (Optional) + Optional arguments to pass to the source during execution. + +* `file` - + (Optional) + A remote or local file. + +* `interpreter` - + (Required) + Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL + +* `output_file_path` - + (Optional) + Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes. + +* `script` - + (Optional) + An inline script. The size of the script is limited to 1024 characters. + +The `source` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `source` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `source` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `rollout` block supports: + +* `disruption_budget` - + (Required) + Required. The maximum number (or percentage) of VMs per zone to disrupt at any given moment. + +* `min_wait_duration` - + (Required) + Required. This determines the minimum duration of time to wait after the configuration changes are applied through the current rollout. A VM continues to count towards the `disruption_budget` at least until this duration of time has passed after configuration changes are applied. + +The `disruption_budget` block supports: + +* `fixed` - + (Optional) + Specifies a fixed value. + +* `percent` - + (Optional) + Specifies the relative value defined as a percentage, which will be multiplied by a reference value. + +- - - + +* `description` - + (Optional) + OS policy assignment description. Length of the description is limited to 1024 characters. + +* `project` - + (Optional) + The project for the resource + + + +The `exclusion_labels` block supports: + +* `labels` - + (Optional) + Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected. + +The `inclusion_labels` block supports: + +* `labels` - + (Optional) + Labels are identified by key/value pairs in this map. A VM should contain all the key/value pairs specified in this map to be selected. + +The `inventories` block supports: + +* `os_short_name` - + (Required) + Required. The OS short name + +* `os_version` - + (Optional) + The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions. + +The `inventory_filters` block supports: + +* `os_short_name` - + (Required) + Required. The OS short name + +* `os_version` - + (Optional) + The OS version Prefix matches are supported if asterisk(*) is provided as the last character. For example, to match all versions with a major version of `7`, specify the following value for this field `7.*` An empty string matches all OS versions. + +The `exec` block supports: + +* `enforce` - + (Optional) + What to run to bring this resource into the desired state. An exit code of 100 indicates "success", any other exit code indicates a failure running enforce. + +* `validate` - + (Required) + Required. What to run to validate this resource is in the desired state. An exit code of 100 indicates "in desired state", and exit code of 101 indicates "not in desired state". Any other exit code indicates a failure running validate. + +The `enforce` block supports: + +* `args` - + (Optional) + Optional arguments to pass to the source during execution. + +* `file` - + (Optional) + A remote or local file. + +* `interpreter` - + (Required) + Required. The script interpreter to use. Possible values: INTERPRETER_UNSPECIFIED, NONE, SHELL, POWERSHELL + +* `output_file_path` - + (Optional) + Only recorded for enforce Exec. Path to an output file (that is created by this Exec) whose content will be recorded in OSPolicyResourceCompliance after a successful run. Absence or failure to read this file will result in this ExecResource being non-compliant. Output file size is limited to 100K bytes. + +* `script` - + (Optional) + An inline script. The size of the script is limited to 1024 characters. + +The `file` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `file` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `file` block supports: + +* `content` - + (Optional) + A a file with this content. The size of the content is limited to 1024 characters. + +* `file` - + (Optional) + A remote or local source. + +* `path` - + (Required) + Required. The absolute path of the file within the VM. + +* `permissions` - + Consists of three octal digits which represent, in order, the permissions of the owner, group, and other users for the file (similarly to the numeric mode used in the linux chmod utility). Each digit represents a three bit number with the 4 bit corresponding to the read permissions, the 2 bit corresponds to the write bit, and the one bit corresponds to the execute permission. Default behavior is 755. Below are some examples of permissions and their associated values: read, write, and execute: 7 read and execute: 5 read and write: 6 read only: 4 + +* `state` - + (Required) + Required. Desired state of the file. Possible values: OS_POLICY_COMPLIANCE_STATE_UNSPECIFIED, COMPLIANT, NON_COMPLIANT, UNKNOWN, NO_OS_POLICIES_APPLICABLE + +The `file` block supports: + +* `allow_insecure` - + (Optional) + Defaults to false. When false, files are subject to validations based on the file type: Remote: A checksum must be specified. Cloud Storage: An object generation number must be specified. + +* `gcs` - + (Optional) + A Cloud Storage object. + +* `local_path` - + (Optional) + A local path within the VM to use. + +* `remote` - + (Optional) + A generic remote file. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `pkg` block supports: + +* `apt` - + (Optional) + A package managed by Apt. + +* `deb` - + (Optional) + A deb package file. + +* `desired_state` - + (Required) + Required. The desired state the agent should maintain for this package. Possible values: DESIRED_STATE_UNSPECIFIED, INSTALLED, REMOVED + +* `googet` - + (Optional) + A package managed by GooGet. + +* `msi` - + (Optional) + An MSI package. + +* `rpm` - + (Optional) + An rpm package file. + +* `yum` - + (Optional) + A package managed by YUM. + +* `zypper` - + (Optional) + A package managed by Zypper. + +The `apt` block supports: + +* `name` - + (Required) + Required. Package name. + +The `deb` block supports: + +* `pull_deps` - + (Optional) + Whether dependencies should also be installed. - install when false: `dpkg -i package` - install when true: `apt-get update && apt-get -y install package.deb` + +* `source` - + (Required) + Required. A deb package. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `googet` block supports: + +* `name` - + (Required) + Required. Package name. + +The `msi` block supports: + +* `properties` - + (Optional) + Additional properties to use during installation. This should be in the format of Property=Setting. Appended to the defaults of `ACTION=INSTALL REBOOT=ReallySuppress`. + +* `source` - + (Required) + Required. The MSI package. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `rpm` block supports: + +* `pull_deps` - + (Optional) + Whether dependencies should also be installed. - install when false: `rpm --upgrade --replacepkgs package.rpm` - install when true: `yum -y install package.rpm` or `zypper -y install package.rpm` + +* `source` - + (Required) + Required. An rpm package. + +The `gcs` block supports: + +* `bucket` - + (Required) + Required. Bucket of the Cloud Storage object. + +* `generation` - + (Optional) + Generation number of the Cloud Storage object. + +* `object` - + (Required) + Required. Name of the Cloud Storage object. + +The `remote` block supports: + +* `sha256_checksum` - + (Optional) + SHA256 checksum of the remote file. + +* `uri` - + (Required) + Required. URI from which to fetch the object. It should contain both the protocol and path following the format `{protocol}://{location}`. + +The `yum` block supports: + +* `name` - + (Required) + Required. Package name. + +The `zypper` block supports: + +* `name` - + (Required) + Required. Package name. + +The `repository` block supports: + +* `apt` - + (Optional) + An Apt Repository. + +* `goo` - + (Optional) + A Goo Repository. + +* `yum` - + (Optional) + A Yum Repository. + +* `zypper` - + (Optional) + A Zypper Repository. + +The `apt` block supports: + +* `archive_type` - + (Required) + Required. Type of archive files in this repository. Possible values: ARCHIVE_TYPE_UNSPECIFIED, DEB, DEB_SRC + +* `components` - + (Required) + Required. List of components for this repository. Must contain at least one item. + +* `distribution` - + (Required) + Required. Distribution of this repository. + +* `gpg_key` - + (Optional) + URI of the key file for this repository. The agent maintains a keyring at `/etc/apt/trusted.gpg.d/osconfig_agent_managed.gpg`. + +* `uri` - + (Required) + Required. URI for this repository. + +The `goo` block supports: + +* `name` - + (Required) + Required. The name of the repository. + +* `url` - + (Required) + Required. The url of the repository. + +The `yum` block supports: + +* `base_url` - + (Required) + Required. The location of the repository directory. + +* `display_name` - + (Optional) + The display name of the repository. + +* `gpg_keys` - + (Optional) + URIs of GPG keys. + +* `id` - + (Required) + Required. A one word, unique name for this repository. This is the `repo id` in the yum config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for resource conflicts. + +The `zypper` block supports: + +* `base_url` - + (Required) + Required. The location of the repository directory. + +* `display_name` - + (Optional) + The display name of the repository. + +* `gpg_keys` - + (Optional) + URIs of GPG keys. + +* `id` - + (Required) + Required. A one word, unique name for this repository. This is the `repo id` in the zypper config file and also the `display_name` if `display_name` is omitted. This id is also used as the unique identifier when checking for GuestPolicy conflicts. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}}` + +* `baseline` - + Output only. Indicates that this revision has been successfully rolled out in this zone and new VMs will be assigned OS policies from this revision. For a given OS policy assignment, there is only one revision with a value of `true` for this field. + +* `deleted` - + Output only. Indicates that this revision deletes the OS policy assignment. + +* `etag` - + The etag for this OS policy assignment. If this is provided on update, it must match the server's etag. + +* `reconciling` - + Output only. Indicates that reconciliation is in progress for the revision. This value is `true` when the `rollout_state` is one of: * IN_PROGRESS * CANCELLING + +* `revision_create_time` - + Output only. The timestamp that the revision was created. + +* `revision_id` - + Output only. The assignment revision ID A new revision is committed whenever a rollout is triggered for a OS policy assignment + +* `rollout_state` - + Output only. OS policy assignment rollout state Possible values: ROLLOUT_STATE_UNSPECIFIED, IN_PROGRESS, CANCELLING, CANCELLED, SUCCEEDED + +* `uid` - + Output only. Server generated unique id for the OS policy assignment resource. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +OSPolicyAssignment can be imported using any of these accepted formats: + +``` +$ terraform import google_os_config_os_policy_assignment.default projects/{{project}}/locations/{{location}}/osPolicyAssignments/{{name}} +$ terraform import google_os_config_os_policy_assignment.default {{project}}/{{location}}/{{name}} +$ terraform import google_os_config_os_policy_assignment.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/privateca_certificate_template.html.markdown b/website/docs/r/privateca_certificate_template.html.markdown new file mode 100644 index 00000000000..497fc02bc45 --- /dev/null +++ b/website/docs/r/privateca_certificate_template.html.markdown @@ -0,0 +1,381 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "Privateca" +layout: "google" +page_title: "Google: google_privateca_certificate_template" +description: |- +Certificate Authority Service provides reusable and parameterized templates that you can use for common certificate issuance scenarios. A certificate template represents a relatively static and well-defined certificate issuance schema within an organization. A certificate template can essentially become a full-fledged vertical certificate issuance framework. +--- + +# google_privateca_certificate_template + +Certificate Authority Service provides reusable and parameterized templates that you can use for common certificate issuance scenarios. A certificate template represents a relatively static and well-defined certificate issuance schema within an organization. A certificate template can essentially become a full-fledged vertical certificate issuance framework. + +For more information, see: +* [Understanding Certificate Templates](https://cloud.google.com/certificate-authority-service/docs/certificate-template) +* [Common configurations and Certificate Profiles](https://cloud.google.com/certificate-authority-service/docs/certificate-profile) +## Example Usage - basic_certificate_template +An example of a basic privateca certificate template +```hcl +resource "google_privateca_certificate_template" "primary" { + location = "us-west1" + name = "template" + description = "An updated sample certificate template" + + identity_constraints { + allow_subject_alt_names_passthrough = true + allow_subject_passthrough = true + + cel_expression { + description = "Always true" + expression = "true" + location = "any.file.anywhere" + title = "Sample expression" + } + } + + labels = { + label-two = "value-two" + } + + passthrough_extensions { + additional_extensions { + object_id_path = [1, 6] + } + + known_extensions = ["EXTENDED_KEY_USAGE"] + } + + predefined_values { + additional_extensions { + object_id { + object_id_path = [1, 6] + } + + value = "c3RyaW5nCg==" + critical = true + } + + aia_ocsp_servers = ["string"] + + ca_options { + is_ca = false + max_issuer_path_length = 6 + } + + key_usage { + base_key_usage { + cert_sign = false + content_commitment = true + crl_sign = false + data_encipherment = true + decipher_only = true + digital_signature = true + encipher_only = true + key_agreement = true + key_encipherment = true + } + + extended_key_usage { + client_auth = true + code_signing = true + email_protection = true + ocsp_signing = true + server_auth = true + time_stamping = true + } + + unknown_extended_key_usages { + object_id_path = [1, 6] + } + } + + policy_ids { + object_id_path = [1, 6] + } + } + + project = "my-project-name" +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - + (Required) + The location for the resource + +* `name` - + (Required) + The resource name for this CertificateTemplate in the format `projects/*/locations/*/certificateTemplates/*`. + + + +The `object_id` block supports: + +* `object_id_path` - + (Required) + Required. The parts of an OID path. The most significant parts of the path come first. + +- - - + +* `description` - + (Optional) + Optional. A human-readable description of scenarios this template is intended for. + +* `identity_constraints` - + (Optional) + Optional. Describes constraints on identities that may be appear in Certificates issued using this template. If this is omitted, then this template will not add restrictions on a certificate's identity. + +* `labels` - + (Optional) + Optional. Labels with user-defined metadata. + +* `passthrough_extensions` - + (Optional) + Optional. Describes the set of X.509 extensions that may appear in a Certificate issued using this CertificateTemplate. If a certificate request sets extensions that don't appear in the passthrough_extensions, those extensions will be dropped. If the issuing CaPool's IssuancePolicy defines baseline_values that don't appear here, the certificate issuance request will fail. If this is omitted, then this template will not add restrictions on a certificate's X.509 extensions. These constraints do not apply to X.509 extensions set in this CertificateTemplate's predefined_values. + +* `predefined_values` - + (Optional) + Optional. A set of X.509 values that will be applied to all issued certificates that use this template. If the certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If the issuing CaPool's IssuancePolicy defines conflicting baseline_values for the same properties, the certificate issuance request will fail. + +* `project` - + (Optional) + The project for the resource + + + +The `identity_constraints` block supports: + +* `allow_subject_alt_names_passthrough` - + (Required) + Required. If this is true, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. Otherwise, the requested SubjectAltNames will be discarded. + +* `allow_subject_passthrough` - + (Required) + Required. If this is true, the Subject field may be copied from a certificate request into the signed certificate. Otherwise, the requested Subject will be discarded. + +* `cel_expression` - + (Optional) + Optional. A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a certificate is signed. To see the full allowed syntax and some examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel + +The `cel_expression` block supports: + +* `description` - + (Optional) + Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + +* `expression` - + (Optional) + Textual representation of an expression in Common Expression Language syntax. + +* `location` - + (Optional) + Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + +* `title` - + (Optional) + Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + +The `passthrough_extensions` block supports: + +* `additional_extensions` - + (Optional) + Optional. A set of ObjectIds identifying custom X.509 extensions. Will be combined with known_extensions to determine the full set of X.509 extensions. + +* `known_extensions` - + (Optional) + Optional. A set of named X.509 extensions. Will be combined with additional_extensions to determine the full set of X.509 extensions. + +The `additional_extensions` block supports: + +* `object_id_path` - + (Required) + Required. The parts of an OID path. The most significant parts of the path come first. + +The `predefined_values` block supports: + +* `additional_extensions` - + (Optional) + Optional. Describes custom X.509 extensions. + +* `aia_ocsp_servers` - + (Optional) + Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the "Authority Information Access" extension in the certificate. + +* `ca_options` - + (Optional) + Optional. Describes options in this X509Parameters that are relevant in a CA certificate. + +* `key_usage` - + (Optional) + Optional. Indicates the intended use for keys that correspond to a certificate. + +* `policy_ids` - + (Optional) + Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4. + +The `additional_extensions` block supports: + +* `critical` - + (Optional) + Optional. Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error). + +* `object_id` - + (Required) + Required. The OID for this X.509 extension. + +* `value` - + (Required) + Required. The value of this X.509 extension. + +The `ca_options` block supports: + +* `is_ca` - + (Optional) + Optional. Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, the extension will be omitted from the CA certificate. + +* `max_issuer_path_length` - + (Optional) + Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate. + +The `key_usage` block supports: + +* `base_key_usage` - + (Optional) + Describes high-level ways in which a key may be used. + +* `extended_key_usage` - + (Optional) + Detailed scenarios in which a key may be used. + +* `unknown_extended_key_usages` - + (Optional) + Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message. + +The `base_key_usage` block supports: + +* `cert_sign` - + (Optional) + The key may be used to sign certificates. + +* `content_commitment` - + (Optional) + The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation". + +* `crl_sign` - + (Optional) + The key may be used sign certificate revocation lists. + +* `data_encipherment` - + (Optional) + The key may be used to encipher data. + +* `decipher_only` - + (Optional) + The key may be used to decipher only. + +* `digital_signature` - + (Optional) + The key may be used for digital signatures. + +* `encipher_only` - + (Optional) + The key may be used to encipher only. + +* `key_agreement` - + (Optional) + The key may be used in a key agreement protocol. + +* `key_encipherment` - + (Optional) + The key may be used to encipher other keys. + +The `extended_key_usage` block supports: + +* `client_auth` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS. + +* `code_signing` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication". + +* `email_protection` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection". + +* `ocsp_signing` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses". + +* `server_auth` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS. + +* `time_stamping` - + (Optional) + Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time". + +The `unknown_extended_key_usages` block supports: + +* `object_id_path` - + (Required) + Required. The parts of an OID path. The most significant parts of the path come first. + +The `policy_ids` block supports: + +* `object_id_path` - + (Required) + Required. The parts of an OID path. The most significant parts of the path come first. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}` + +* `create_time` - + Output only. The time at which this CertificateTemplate was created. + +* `update_time` - + Output only. The time at which this CertificateTemplate was updated. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +CertificateTemplate can be imported using any of these accepted formats: + +``` +$ terraform import google_privateca_certificate_template.default projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}} +$ terraform import google_privateca_certificate_template.default {{project}}/{{location}}/{{name}} +$ terraform import google_privateca_certificate_template.default {{location}}/{{name}} +``` + + + diff --git a/website/docs/r/recaptcha_enterprise_key.html.markdown b/website/docs/r/recaptcha_enterprise_key.html.markdown new file mode 100644 index 00000000000..ca52df8b086 --- /dev/null +++ b/website/docs/r/recaptcha_enterprise_key.html.markdown @@ -0,0 +1,267 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: DCL *** +# +# ---------------------------------------------------------------------------- +# +# This file is managed by Magic Modules (https:#github.com/GoogleCloudPlatform/magic-modules) +# and is based on the DCL (https:#github.com/GoogleCloudPlatform/declarative-resource-client-library). +# Changes will need to be made to the DCL or Magic Modules instead of here. +# +# We are not currently able to accept contributions to this file. If changes +# are required, please file an issue at https:#github.com/hashicorp/terraform-provider-google/issues/new/choose +# +# ---------------------------------------------------------------------------- +subcategory: "RecaptchaEnterprise" +layout: "google" +page_title: "Google: google_recaptcha_enterprise_key" +description: |- +The RecaptchaEnterprise Key resource +--- + +# google_recaptcha_enterprise_key + +The RecaptchaEnterprise Key resource + +## Example Usage - android_key +A basic test of recaptcha enterprise key that can be used by Android apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + android_settings { + allow_all_package_names = true + allowed_package_names = [] + } + + labels = { + label-one = "value-one" + } + + project = "my-project-name" + + testing_options { + testing_score = 0.8 + } +} + + +``` +## Example Usage - ios_key +A basic test of recaptcha enterprise key that can be used by iOS apps +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + ios_settings { + allow_all_bundle_ids = true + allowed_bundle_ids = [] + } + + labels = { + label-one = "value-one" + } + + project = "my-project-name" + + testing_options { + testing_score = 1 + } +} + + +``` +## Example Usage - minimal_key +A minimal test of recaptcha enterprise key +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + labels = {} + project = "my-project-name" + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + } +} + + +``` +## Example Usage - web_key +A basic test of recaptcha enterprise key that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + labels = { + label-one = "value-one" + } + + project = "my-project-name" + + testing_options { + testing_challenge = "NOCAPTCHA" + testing_score = 0.5 + } + + web_settings { + integration_type = "CHECKBOX" + allow_all_domains = true + allowed_domains = [] + challenge_security_preference = "USABILITY" + } +} + + +``` +## Example Usage - web_score_key +A basic test of recaptcha enterprise key with score integration type that can be used by websites +```hcl +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "display-name-one" + + labels = { + label-one = "value-one" + } + + project = "my-project-name" + + testing_options { + testing_score = 0.5 + } + + web_settings { + integration_type = "SCORE" + allow_all_domains = true + allow_amp_traffic = false + allowed_domains = [] + } +} + + +``` + +## Argument Reference + +The following arguments are supported: + +* `display_name` - + (Required) + Human-readable display name of this key. Modifiable by user. + + + +- - - + +* `android_settings` - + (Optional) + Settings for keys that can be used by Android apps. + +* `ios_settings` - + (Optional) + Settings for keys that can be used by iOS apps. + +* `labels` - + (Optional) + See [Creating and managing labels](https://cloud.google.com/recaptcha-enterprise/docs/labels). + +* `project` - + (Optional) + The project for the resource + +* `testing_options` - + (Optional) + Options for user acceptance testing. + +* `web_settings` - + (Optional) + Settings for keys that can be used by websites. + + + +The `android_settings` block supports: + +* `allow_all_package_names` - + (Optional) + If set to true, it means allowed_package_names will not be enforced. + +* `allowed_package_names` - + (Optional) + Android package names of apps allowed to use the key. Example: 'com.companyname.appname' + +The `ios_settings` block supports: + +* `allow_all_bundle_ids` - + (Optional) + If set to true, it means allowed_bundle_ids will not be enforced. + +* `allowed_bundle_ids` - + (Optional) + iOS bundle ids of apps allowed to use the key. Example: 'com.companyname.productname.appname' + +The `testing_options` block supports: + +* `testing_challenge` - + (Optional) + For challenge-based keys only (CHECKBOX, INVISIBLE), all challenge requests for this site will return nocaptcha if NOCAPTCHA, or an unsolvable challenge if UNSOLVABLE_CHALLENGE. Possible values: TESTING_CHALLENGE_UNSPECIFIED, NOCAPTCHA, UNSOLVABLE_CHALLENGE + +* `testing_score` - + (Optional) + All assessments for this Key will return this score. Must be between 0 (likely not legitimate) and 1 (likely legitimate) inclusive. + +The `web_settings` block supports: + +* `allow_all_domains` - + (Optional) + If set to true, it means allowed_domains will not be enforced. + +* `allow_amp_traffic` - + (Optional) + If set to true, the key can be used on AMP (Accelerated Mobile Pages) websites. This is supported only for the SCORE integration type. + +* `allowed_domains` - + (Optional) + Domains or subdomains of websites allowed to use the key. All subdomains of an allowed domain are automatically allowed. A valid domain requires a host and must not include any path, port, query or fragment. Examples: 'example.com' or 'subdomain.example.com' + +* `challenge_security_preference` - + (Optional) + Settings for the frequency and difficulty at which this key triggers captcha challenges. This should only be specified for IntegrationTypes CHECKBOX and INVISIBLE. Possible values: CHALLENGE_SECURITY_PREFERENCE_UNSPECIFIED, USABILITY, BALANCE, SECURITY + +* `integration_type` - + (Required) + Required. Describes how this key is integrated with the website. Possible values: SCORE, CHECKBOX, INVISIBLE + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/keys/{{name}}` + +* `create_time` - + The timestamp corresponding to the creation of this Key. + +* `name` - + The resource name for the Key in the format "projects/{project}/keys/{key}". + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 10 minutes. +- `update` - Default is 10 minutes. +- `delete` - Default is 10 minutes. + +## Import + +Key can be imported using any of these accepted formats: + +``` +$ terraform import google_recaptcha_enterprise_key.default projects/{{project}}/keys/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{project}}/{{name}} +$ terraform import google_recaptcha_enterprise_key.default {{name}} +``` + + + diff --git a/website/google.erb b/website/google.erb index 21a845382cb..b321f71212f 100644 --- a/website/google.erb +++ b/website/google.erb @@ -264,6 +264,22 @@ +
  • + AssuredWorkloads + +
  • +
  • BigQuery
  • +
  • + Resources + +
  • @@ -2155,6 +2189,24 @@ +
  • + Resources + +
  • @@ -2410,6 +2462,22 @@ +
  • + Eventarc + +
  • +
  • Filestore
  • +
  • + NetworkConnectivity + +
  • +
  • NetworkManagement
  • +
  • + OrgPolicy + +
  • + +
  • + OsConfig + +
  • + +
  • + Privateca + +
  • + +
  • + RecaptchaEnterprise + +
  • +
  • Resource Manager