diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_datastream_static_ips.go b/mmv1/third_party/terraform/data_sources/data_source_google_datastream_static_ips.go index e378030530d2..9beaaf0fe64b 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_datastream_static_ips.go +++ b/mmv1/third_party/terraform/data_sources/data_source_google_datastream_static_ips.go @@ -50,7 +50,7 @@ func dataSourceGoogleDatastreamStaticIpsRead(d *schema.ResourceData, meta interf return err } - staticIps, err := paginatedListRequest(project, url, userAgent, config, flattenStaticIpsList) + staticIps, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenStaticIpsList) if err != nil { return fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) } diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_monitoring_uptime_check_ips.go b/mmv1/third_party/terraform/data_sources/data_source_google_monitoring_uptime_check_ips.go index 111b9de4fdd3..840a52e6cbbb 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_monitoring_uptime_check_ips.go +++ b/mmv1/third_party/terraform/data_sources/data_source_google_monitoring_uptime_check_ips.go @@ -46,7 +46,7 @@ func dataSourceGoogleMonitoringUptimeCheckIpsRead(d *schema.ResourceData, meta i url := "https://monitoring.googleapis.com/v3/uptimeCheckIps" - uptimeCheckIps, err := paginatedListRequest("", url, userAgent, config, flattenUptimeCheckIpsList) + uptimeCheckIps, err := tpgresource.PaginatedListRequest("", url, userAgent, config, flattenUptimeCheckIpsList) if err != nil { return fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) } diff --git a/mmv1/third_party/terraform/data_sources/data_source_tpu_tensorflow_versions.go b/mmv1/third_party/terraform/data_sources/data_source_tpu_tensorflow_versions.go index 40ffb241b57d..cb30d432e780 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_tpu_tensorflow_versions.go +++ b/mmv1/third_party/terraform/data_sources/data_source_tpu_tensorflow_versions.go @@ -55,7 +55,7 @@ func dataSourceTpuTensorFlowVersionsRead(d *schema.ResourceData, meta interface{ return err } - versionsRaw, err := paginatedListRequest(project, url, userAgent, config, flattenTpuTensorflowVersions) + versionsRaw, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenTpuTensorflowVersions) if err != nil { return fmt.Errorf("Error listing TPU Tensorflow versions: %s", err) } diff --git a/mmv1/third_party/terraform/resources/resource_cloudfunctions_function.go b/mmv1/third_party/terraform/resources/resource_cloudfunctions_function.go index bcf2de24d93a..5e9071d62560 100644 --- a/mmv1/third_party/terraform/resources/resource_cloudfunctions_function.go +++ b/mmv1/third_party/terraform/resources/resource_cloudfunctions_function.go @@ -1,1177 +1,7 @@ package google import ( - "regexp" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/terraform-provider-google/google/verify" - - "google.golang.org/api/cloudfunctions/v1" - - "fmt" - "log" - "net/url" - "strconv" - "strings" - "time" + "github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions" ) -var allowedIngressSettings = []string{ - "ALLOW_ALL", - "ALLOW_INTERNAL_AND_GCLB", - "ALLOW_INTERNAL_ONLY", -} - -var allowedVpcConnectorEgressSettings = []string{ - "ALL_TRAFFIC", - "PRIVATE_RANGES_ONLY", -} - -type cloudFunctionId struct { - Project string - Region string - Name string -} - -func (s *cloudFunctionId) cloudFunctionId() string { - return fmt.Sprintf("projects/%s/locations/%s/functions/%s", s.Project, s.Region, s.Name) -} - -// matches all international lower case letters, number, underscores and dashes. -var labelKeyRegex = regexp.MustCompile(`^[\p{Ll}0-9_-]+$`) - -func labelKeyValidator(val interface{}, key string) (warns []string, errs []error) { - if val == nil { - return - } - - m := val.(map[string]interface{}) - for k := range m { - if !labelKeyRegex.MatchString(k) { - errs = append(errs, fmt.Errorf("%q is an invalid label key. See https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements", k)) - } - } - return -} - -func (s *cloudFunctionId) locationId() string { - return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) -} - -func parseCloudFunctionId(d *schema.ResourceData, config *transport_tpg.Config) (*cloudFunctionId, error) { - if err := tpgresource.ParseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - return &cloudFunctionId{ - Project: d.Get("project").(string), - Region: d.Get("region").(string), - Name: d.Get("name").(string), - }, nil -} - -// Differs from validateGCEName because Cloud Functions allow capital letters -// at start/end -func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { - re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` - return verify.ValidateRegexp(re)(v, k) -} - -func partsCompare(a, b, reg string) bool { - - regex := regexp.MustCompile(reg) - if regex.MatchString(a) && regex.MatchString(b) { - aParts := regex.FindStringSubmatch(a) - bParts := regex.FindStringSubmatch(b) - for i := 0; i < len(aParts); i++ { - if aParts[i] != bParts[i] { - return false - } - } - } else if regex.MatchString(a) { - aParts := regex.FindStringSubmatch(a) - if aParts[len(aParts)-1] != b { - return false - } - } else if regex.MatchString(b) { - bParts := regex.FindStringSubmatch(b) - if bParts[len(bParts)-1] != a { - return false - } - } else { - if a != b { - return false - } - } - - return true -} - -// based on CompareSelfLinkOrResourceName, but less reusable and allows multi-/ -// strings in the new state (config) part -func compareSelfLinkOrResourceNameWithMultipleParts(_, old, new string, _ *schema.ResourceData) bool { - // two formats based on expandEventTrigger() - regex1 := "projects/(.+)/databases/\\(default\\)/documents/(.+)" - regex2 := "projects/(.+)/(.+)/(.+)" - return partsCompare(old, new, regex1) || partsCompare(old, new, regex2) -} - -func ResourceCloudFunctionsFunction() *schema.Resource { - return &schema.Resource{ - Create: resourceCloudFunctionsCreate, - Read: resourceCloudFunctionsRead, - Update: resourceCloudFunctionsUpdate, - Delete: resourceCloudFunctionsDestroy, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(5 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(5 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `A user-defined name of the function. Function names must be unique globally.`, - ValidateFunc: validateResourceCloudFunctionsFunctionName, - }, - - "build_worker_pool": { - Type: schema.TypeString, - Optional: true, - Description: `Name of the Cloud Build Custom Worker Pool that should be used to build the function.`, - }, - - "source_archive_bucket": { - Type: schema.TypeString, - Optional: true, - Description: `The GCS bucket containing the zip archive which contains the function.`, - }, - - "source_archive_object": { - Type: schema.TypeString, - Optional: true, - Description: `The source archive object (file) in archive bucket.`, - }, - - "source_repository": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Represents parameters related to source repository where a function is hosted. Cannot be set alongside source_archive_bucket or source_archive_object.`, - ConflictsWith: []string{"source_archive_bucket", "source_archive_object"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "url": { - Type: schema.TypeString, - Required: true, - Description: `The URL pointing to the hosted repository where the function is defined.`, - }, - "deployed_url": { - Type: schema.TypeString, - Computed: true, - Description: `The URL pointing to the hosted repository where the function was defined at the time of deployment.`, - }, - }, - }, - }, - - "docker_registry": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `Docker Registry to use for storing the function's Docker images. Allowed values are CONTAINER_REGISTRY (default) and ARTIFACT_REGISTRY.`, - }, - - "docker_repository": { - Type: schema.TypeString, - Optional: true, - Description: `User managed repository created in Artifact Registry optionally with a customer managed encryption key. If specified, deployments will use Artifact Registry for storing images built with Cloud Build.`, - }, - - "kms_key_name": { - Type: schema.TypeString, - Optional: true, - Description: `Resource name of a KMS crypto key (managed by the user) used to encrypt/decrypt function resources.`, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - Description: `Description of the function.`, - }, - - "available_memory_mb": { - Type: schema.TypeInt, - Optional: true, - Default: 256, - Description: `Memory (in MB), available to the function. Default value is 256. Possible values include 128, 256, 512, 1024, etc.`, - }, - - "timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - ValidateFunc: validation.IntBetween(1, 540), - Description: `Timeout (in seconds) for the function. Default value is 60 seconds. Cannot be more than 540 seconds.`, - }, - - "entry_point": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the function that will be executed when the Google Cloud Function is triggered.`, - }, - - "ingress_settings": { - Type: schema.TypeString, - Optional: true, - Default: "ALLOW_ALL", - ValidateFunc: validation.StringInSlice(allowedIngressSettings, true), - Description: `String value that controls what traffic can reach the function. Allowed values are ALLOW_ALL and ALLOW_INTERNAL_ONLY. Changes to this field will recreate the cloud function.`, - }, - - "vpc_connector_egress_settings": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(allowedVpcConnectorEgressSettings, true), - Description: `The egress settings for the connector, controlling what traffic is diverted through it. Allowed values are ALL_TRAFFIC and PRIVATE_RANGES_ONLY. Defaults to PRIVATE_RANGES_ONLY. If unset, this field preserves the previously set value.`, - }, - - "labels": { - Type: schema.TypeMap, - ValidateFunc: labelKeyValidator, - Optional: true, - Description: `A set of key/value label pairs to assign to the function. Label keys must follow the requirements at https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements.`, - }, - - "runtime": { - Type: schema.TypeString, - Required: true, - Description: `The runtime in which the function is going to run. Eg. "nodejs8", "nodejs10", "python37", "go111".`, - }, - - "service_account_email": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: ` If provided, the self-provided service account to run the function with.`, - }, - - "vpc_connector": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `The VPC Network Connector that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network connector resource. The format of this field is projects/*/locations/*/connectors/*.`, - }, - - "environment_variables": { - Type: schema.TypeMap, - Optional: true, - Description: `A set of key/value environment variable pairs to assign to the function.`, - }, - - "build_environment_variables": { - Type: schema.TypeMap, - Optional: true, - Description: ` A set of key/value environment variable pairs available during build time.`, - }, - - "trigger_http": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Boolean variable. Any HTTP request (of a supported type) to the endpoint will trigger function execution. Supported HTTP request types are: POST, PUT, GET, DELETE, and OPTIONS. Endpoint is returned as https_trigger_url. Cannot be used with trigger_bucket and trigger_topic.`, - }, - - "event_trigger": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ConflictsWith: []string{"trigger_http"}, - MaxItems: 1, - Description: `A source that fires events in response to a condition in another service. Cannot be used with trigger_http.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "event_type": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - Description: `The type of event to observe. For example: "google.storage.object.finalize". See the documentation on calling Cloud Functions for a full reference of accepted triggers.`, - }, - "resource": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceNameWithMultipleParts, - Description: `The name or partial URI of the resource from which to observe events. For example, "myBucket" or "projects/my-project/topics/my-topic"`, - }, - "failure_policy": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Specifies policy for failed executions`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "retry": { - Type: schema.TypeBool, - // not strictly required, but this way an empty block can't be specified - Required: true, - Description: `Whether the function should be retried on failure. Defaults to false.`, - }, - }}, - }, - }, - }, - }, - - "https_trigger_url": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `URL which triggers function execution. Returned only if trigger_http is used.`, - }, - - "https_trigger_security_level": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `The security level for the function. Defaults to SECURE_OPTIONAL. Valid only if trigger_http is used.`, - }, - - "max_instances": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntAtLeast(0), - Description: `The limit on the maximum number of function instances that may coexist at a given time.`, - }, - - "min_instances": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), - Description: `The limit on the minimum number of function instances that may coexist at a given time.`, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Project of the function. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Region of function. If it is not provided, the provider region is used.`, - }, - - "secret_environment_variables": { - Type: schema.TypeList, - Optional: true, - Description: `Secret environment variables configuration`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - Description: `Name of the environment variable.`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function.`, - }, - "secret": { - Type: schema.TypeString, - Required: true, - Description: `ID of the secret in secret manager (not the full resource name).`, - }, - "version": { - Type: schema.TypeString, - Required: true, - Description: `Version of the secret (version number or the string "latest"). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new clones start.`, - }, - }, - }, - }, - - "secret_volumes": { - Type: schema.TypeList, - Optional: true, - Description: `Secret volumes configuration.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mount_path": { - Type: schema.TypeString, - Required: true, - Description: `The path within the container to mount the secret volume. For example, setting the mount_path as "/etc/secrets" would mount the secret value files under the "/etc/secrets" directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount paths: "/etc/secrets" Restricted mount paths: "/cloudsql", "/dev/log", "/pod", "/proc", "/var/log".`, - }, - "project_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Description: `Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function.`, - }, - "secret": { - Type: schema.TypeString, - Required: true, - Description: `ID of the secret in secret manager (not the full resource name).`, - }, - "versions": { - Type: schema.TypeList, - Optional: true, - Description: `List of secret versions to mount for this secret. If empty, the "latest" version of the secret will be made available in a file named after the secret under the mount point.`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "path": { - Type: schema.TypeString, - Required: true, - Description: `Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mount_path as "/etc/secrets" and path as "/secret_foo" would mount the secret value file at "/etc/secrets/secret_foo".`, - }, - "version": { - Type: schema.TypeString, - Required: true, - Description: `Version of the secret (version number or the string "latest"). It is preferable to use "latest" version with secret volumes as secret value changes are reflected immediately.`, - }, - }, - }, - }, - }, - }, - }, - "status": { - Type: schema.TypeString, - Computed: true, - Description: `Describes the current stage of a deployment.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - region, err := tpgresource.GetRegion(d, config) - if err != nil { - return err - } - - cloudFuncId := &cloudFunctionId{ - Project: project, - Region: region, - Name: d.Get("name").(string), - } - - function := &cloudfunctions.CloudFunction{ - Name: cloudFuncId.cloudFunctionId(), - Runtime: d.Get("runtime").(string), - ServiceAccountEmail: d.Get("service_account_email").(string), - ForceSendFields: []string{}, - } - - sourceRepos := d.Get("source_repository").([]interface{}) - if len(sourceRepos) > 0 { - function.SourceRepository = expandSourceRepository(sourceRepos) - } else { - sourceArchiveBucket := d.Get("source_archive_bucket").(string) - sourceArchiveObj := d.Get("source_archive_object").(string) - if sourceArchiveBucket == "" || sourceArchiveObj == "" { - return fmt.Errorf("either source_repository or both of source_archive_bucket+source_archive_object must be set") - } - function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) - } - - secretEnv := d.Get("secret_environment_variables").([]interface{}) - if len(secretEnv) > 0 { - function.SecretEnvironmentVariables = expandSecretEnvironmentVariables(secretEnv) - } - - secretVolume := d.Get("secret_volumes").([]interface{}) - if len(secretVolume) > 0 { - function.SecretVolumes = expandSecretVolumes(secretVolume) - } - - if v, ok := d.GetOk("available_memory_mb"); ok { - availableMemoryMb := v.(int) - function.AvailableMemoryMb = int64(availableMemoryMb) - } - - if v, ok := d.GetOk("description"); ok { - function.Description = v.(string) - } - - if v, ok := d.GetOk("build_worker_pool"); ok { - function.BuildWorkerPool = v.(string) - } - - if v, ok := d.GetOk("entry_point"); ok { - function.EntryPoint = v.(string) - } - - if v, ok := d.GetOk("timeout"); ok { - function.Timeout = fmt.Sprintf("%vs", v.(int)) - } - - if v, ok := d.GetOk("event_trigger"); ok { - function.EventTrigger = expandEventTrigger(v.([]interface{}), project) - } else if v, ok := d.GetOk("trigger_http"); ok && v.(bool) { - function.HttpsTrigger = &cloudfunctions.HttpsTrigger{} - function.HttpsTrigger.SecurityLevel = d.Get("https_trigger_security_level").(string) - } else { - return fmt.Errorf("One of `event_trigger` or `trigger_http` is required: " + - "You must specify a trigger when deploying a new function.") - } - - if v, ok := d.GetOk("ingress_settings"); ok { - function.IngressSettings = v.(string) - } - - if _, ok := d.GetOk("labels"); ok { - function.Labels = tpgresource.ExpandLabels(d) - } - - if _, ok := d.GetOk("environment_variables"); ok { - function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) - } - - if _, ok := d.GetOk("build_environment_variables"); ok { - function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) - } - - if v, ok := d.GetOk("vpc_connector"); ok { - function.VpcConnector = v.(string) - } - - if v, ok := d.GetOk("vpc_connector_egress_settings"); ok { - function.VpcConnectorEgressSettings = v.(string) - } - - if v, ok := d.GetOk("docker_registry"); ok { - function.DockerRegistry = v.(string) - } - - if v, ok := d.GetOk("docker_repository"); ok { - function.DockerRepository = v.(string) - } - - if v, ok := d.GetOk("kms_key_name"); ok { - function.KmsKeyName = v.(string) - } - - if v, ok := d.GetOk("max_instances"); ok { - function.MaxInstances = int64(v.(int)) - } - - if v, ok := d.GetOk("min_instances"); ok { - function.MinInstances = int64(v.(int)) - } - - log.Printf("[DEBUG] Creating cloud function: %s", function.Name) - - // We retry the whole create-and-wait because Cloud Functions - // will sometimes fail a creation operation entirely if it fails to pull - // source code and we need to try the whole creation again. - rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Create( - cloudFuncId.locationId(), function).Do() - if err != nil { - return err - } - - // Name of function should be unique - d.SetId(cloudFuncId.cloudFunctionId()) - - return cloudFunctionsOperationWait(config, op, "Creating CloudFunctions Function", userAgent, - d.Timeout(schema.TimeoutCreate)) - }, - Timeout: d.Timeout(schema.TimeoutCreate), - ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{IsCloudFunctionsSourceCodeError}, - }) - if rerr != nil { - return rerr - } - log.Printf("[DEBUG] Finished creating cloud function: %s", function.Name) - return resourceCloudFunctionsRead(d, meta) -} - -func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) - } - - if err := d.Set("name", cloudFuncId.Name); err != nil { - return fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", function.Description); err != nil { - return fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("build_worker_pool", function.BuildWorkerPool); err != nil { - return fmt.Errorf("Error setting build_worker_pool: %s", err) - } - if err := d.Set("entry_point", function.EntryPoint); err != nil { - return fmt.Errorf("Error setting entry_point: %s", err) - } - if err := d.Set("available_memory_mb", function.AvailableMemoryMb); err != nil { - return fmt.Errorf("Error setting available_memory_mb: %s", err) - } - sRemoved := strings.Replace(function.Timeout, "s", "", -1) - timeout, err := strconv.Atoi(sRemoved) - if err != nil { - return err - } - if err := d.Set("timeout", timeout); err != nil { - return fmt.Errorf("Error setting timeout: %s", err) - } - if err := d.Set("ingress_settings", function.IngressSettings); err != nil { - return fmt.Errorf("Error setting ingress_settings: %s", err) - } - if err := d.Set("labels", function.Labels); err != nil { - return fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("runtime", function.Runtime); err != nil { - return fmt.Errorf("Error setting runtime: %s", err) - } - if err := d.Set("service_account_email", function.ServiceAccountEmail); err != nil { - return fmt.Errorf("Error setting service_account_email: %s", err) - } - if err := d.Set("environment_variables", function.EnvironmentVariables); err != nil { - return fmt.Errorf("Error setting environment_variables: %s", err) - } - if err := d.Set("vpc_connector", function.VpcConnector); err != nil { - return fmt.Errorf("Error setting vpc_connector: %s", err) - } - if err := d.Set("vpc_connector_egress_settings", function.VpcConnectorEgressSettings); err != nil { - return fmt.Errorf("Error setting vpc_connector_egress_settings: %s", err) - } - if function.SourceArchiveUrl != "" { - // sourceArchiveUrl should always be a Google Cloud Storage URL (e.g. gs://bucket/object) - // https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions - sourceURL, err := url.Parse(function.SourceArchiveUrl) - if err != nil { - return err - } - bucket := sourceURL.Host - object := strings.TrimLeft(sourceURL.Path, "/") - if err := d.Set("source_archive_bucket", bucket); err != nil { - return fmt.Errorf("Error setting source_archive_bucket: %s", err) - } - if err := d.Set("source_archive_object", object); err != nil { - return fmt.Errorf("Error setting source_archive_object: %s", err) - } - } - if err := d.Set("source_repository", flattenSourceRepository(function.SourceRepository)); err != nil { - return fmt.Errorf("Error setting source_repository: %s", err) - } - - if err := d.Set("secret_environment_variables", flattenSecretEnvironmentVariables(function.SecretEnvironmentVariables)); err != nil { - return fmt.Errorf("Error setting secret_environment_variables: %s", err) - } - - if err := d.Set("secret_volumes", flattenSecretVolumes(function.SecretVolumes)); err != nil { - return fmt.Errorf("Error setting secret_volumes: %s", err) - } - - if err := d.Set("status", function.Status); err != nil { - return fmt.Errorf("Error setting status: %s", err) - } - - if function.HttpsTrigger != nil { - if err := d.Set("trigger_http", true); err != nil { - return fmt.Errorf("Error setting trigger_http: %s", err) - } - if err := d.Set("https_trigger_url", function.HttpsTrigger.Url); err != nil { - return fmt.Errorf("Error setting https_trigger_url: %s", err) - } - if err := d.Set("https_trigger_security_level", function.HttpsTrigger.SecurityLevel); err != nil { - return fmt.Errorf("Error setting https_trigger_security_level: %s", err) - } - } - - if err := d.Set("event_trigger", flattenEventTrigger(function.EventTrigger)); err != nil { - return fmt.Errorf("Error setting event_trigger: %s", err) - } - if err := d.Set("docker_registry", function.DockerRegistry); err != nil { - return fmt.Errorf("Error setting docker_registry: %s", err) - } - if err := d.Set("docker_repository", function.DockerRepository); err != nil { - return fmt.Errorf("Error setting docker_repository: %s", err) - } - if err := d.Set("kms_key_name", function.KmsKeyName); err != nil { - return fmt.Errorf("Error setting kms_key_name: %s", err) - } - if err := d.Set("max_instances", function.MaxInstances); err != nil { - return fmt.Errorf("Error setting max_instances: %s", err) - } - if err := d.Set("min_instances", function.MinInstances); err != nil { - return fmt.Errorf("Error setting min_instances: %s", err) - } - if err := d.Set("region", cloudFuncId.Region); err != nil { - return fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("project", cloudFuncId.Project); err != nil { - return fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) error { - log.Printf("[DEBUG]: Updating google_cloudfunctions_function") - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - project, err := tpgresource.GetProject(d, config) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - // The full function needs to supplied in the PATCH call to evaluate some Organization Policies. https://github.com/hashicorp/terraform-provider-google/issues/6603 - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) - } - - // The full function may contain a reference to manually uploaded code if the function was imported from gcloud - // This does not work with Terraform, so zero it out from the function if it exists. See https://github.com/hashicorp/terraform-provider-google/issues/7921 - function.SourceUploadUrl = "" - - d.Partial(true) - - var updateMaskArr []string - if d.HasChange("available_memory_mb") { - availableMemoryMb := d.Get("available_memory_mb").(int) - function.AvailableMemoryMb = int64(availableMemoryMb) - updateMaskArr = append(updateMaskArr, "availableMemoryMb") - } - - if d.HasChange("source_archive_bucket") || d.HasChange("source_archive_object") { - sourceArchiveBucket := d.Get("source_archive_bucket").(string) - sourceArchiveObj := d.Get("source_archive_object").(string) - function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) - updateMaskArr = append(updateMaskArr, "sourceArchiveUrl") - } - - if d.HasChange("source_repository") { - function.SourceRepository = expandSourceRepository(d.Get("source_repository").([]interface{})) - updateMaskArr = append(updateMaskArr, "sourceRepository") - } - - if d.HasChange("secret_environment_variables") { - function.SecretEnvironmentVariables = expandSecretEnvironmentVariables(d.Get("secret_environment_variables").([]interface{})) - updateMaskArr = append(updateMaskArr, "secretEnvironmentVariables") - } - - if d.HasChange("secret_volumes") { - function.SecretVolumes = expandSecretVolumes(d.Get("secret_volumes").([]interface{})) - updateMaskArr = append(updateMaskArr, "secretVolumes") - } - - if d.HasChange("description") { - function.Description = d.Get("description").(string) - updateMaskArr = append(updateMaskArr, "description") - } - - if d.HasChange("build_worker_pool") { - function.BuildWorkerPool = d.Get("build_worker_pool").(string) - updateMaskArr = append(updateMaskArr, "build_worker_pool") - } - - if d.HasChange("timeout") { - function.Timeout = fmt.Sprintf("%vs", d.Get("timeout").(int)) - updateMaskArr = append(updateMaskArr, "timeout") - } - - if d.HasChange("ingress_settings") { - function.IngressSettings = d.Get("ingress_settings").(string) - updateMaskArr = append(updateMaskArr, "ingressSettings") - } - - if d.HasChange("labels") { - function.Labels = tpgresource.ExpandLabels(d) - updateMaskArr = append(updateMaskArr, "labels") - } - - if d.HasChange("runtime") { - function.Runtime = d.Get("runtime").(string) - updateMaskArr = append(updateMaskArr, "runtime") - } - - if d.HasChange("environment_variables") { - function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) - updateMaskArr = append(updateMaskArr, "environmentVariables") - } - - if d.HasChange("build_environment_variables") { - function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) - updateMaskArr = append(updateMaskArr, "buildEnvironmentVariables") - } - - if d.HasChange("vpc_connector") { - function.VpcConnector = d.Get("vpc_connector").(string) - updateMaskArr = append(updateMaskArr, "vpcConnector") - } - - if d.HasChange("vpc_connector_egress_settings") { - function.VpcConnectorEgressSettings = d.Get("vpc_connector_egress_settings").(string) - updateMaskArr = append(updateMaskArr, "vpcConnectorEgressSettings") - } - - if d.HasChange("event_trigger") { - function.EventTrigger = expandEventTrigger(d.Get("event_trigger").([]interface{}), project) - updateMaskArr = append(updateMaskArr, "eventTrigger", "eventTrigger.failurePolicy.retry") - } - - if d.HasChange("https_trigger_security_level") { - if function.HttpsTrigger == nil { - function.HttpsTrigger = &cloudfunctions.HttpsTrigger{} - } - function.HttpsTrigger.SecurityLevel = d.Get("https_trigger_security_level").(string) - updateMaskArr = append(updateMaskArr, "httpsTrigger", "httpsTrigger.securityLevel") - } - - if d.HasChange("docker_registry") { - function.DockerRegistry = d.Get("docker_registry").(string) - updateMaskArr = append(updateMaskArr, "dockerRegistry") - } - - if d.HasChange("docker_repository") { - function.DockerRepository = d.Get("docker_repository").(string) - updateMaskArr = append(updateMaskArr, "dockerRepository") - } - - if d.HasChange("kms_key_name") { - function.KmsKeyName = d.Get("kms_key_name").(string) - updateMaskArr = append(updateMaskArr, "kmsKeyName") - } - - if d.HasChange("max_instances") { - function.MaxInstances = int64(d.Get("max_instances").(int)) - updateMaskArr = append(updateMaskArr, "maxInstances") - } - - if d.HasChange("min_instances") { - function.MinInstances = int64(d.Get("min_instances").(int)) - updateMaskArr = append(updateMaskArr, "minInstances") - } - - if len(updateMaskArr) > 0 { - log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) - updateMask := strings.Join(updateMaskArr, ",") - rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ - RetryFunc: func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Patch(function.Name, function). - UpdateMask(updateMask).Do() - if err != nil { - return err - } - - return cloudFunctionsOperationWait(config, op, "Updating CloudFunctions Function", userAgent, - d.Timeout(schema.TimeoutUpdate)) - }, - Timeout: d.Timeout(schema.TimeoutUpdate), - }) - if rerr != nil { - return fmt.Errorf("Error while updating cloudfunction configuration: %s", rerr) - } - } - d.Partial(false) - - return resourceCloudFunctionsRead(d, meta) -} - -func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Delete(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return err - } - err = cloudFunctionsOperationWait(config, op, "Deleting CloudFunctions Function", userAgent, - d.Timeout(schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandEventTrigger(configured []interface{}, project string) *cloudfunctions.EventTrigger { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - data := configured[0].(map[string]interface{}) - eventType := data["event_type"].(string) - resource := data["resource"].(string) - - // if resource starts with "projects/", we can reasonably assume it's a - // partial URI. Otherwise, it's a shortname. Construct a partial URI based - // on the event type if so. - if !strings.HasPrefix(resource, "projects/") { - shape := "" - switch { - case strings.HasPrefix(eventType, "google.storage.object."): - shape = "projects/%s/buckets/%s" - case strings.HasPrefix(eventType, "google.pubsub.topic."): - shape = "projects/%s/topics/%s" - // Legacy style triggers - case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"): - // Note that this is an uncommon way to refer to buckets; normally, - // you'd use to the global URL of the bucket and not the project - // scoped one. - shape = "projects/%s/buckets/%s" - case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"): - shape = "projects/%s/topics/%s" - case strings.HasPrefix(eventType, "providers/cloud.firestore/eventTypes/"): - // Firestore doesn't not yet support multiple databases, so "(default)" is assumed. - // https://cloud.google.com/functions/docs/calling/cloud-firestore#deploying_your_function - shape = "projects/%s/databases/(default)/documents/%s" - } - - resource = fmt.Sprintf(shape, project, resource) - } - - return &cloudfunctions.EventTrigger{ - EventType: eventType, - Resource: resource, - FailurePolicy: expandFailurePolicy(data["failure_policy"].([]interface{})), - } -} - -func flattenEventTrigger(eventTrigger *cloudfunctions.EventTrigger) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if eventTrigger == nil { - return result - } - - result = append(result, map[string]interface{}{ - "event_type": eventTrigger.EventType, - "resource": eventTrigger.Resource, - "failure_policy": flattenFailurePolicy(eventTrigger.FailurePolicy), - }) - - return result -} - -func expandFailurePolicy(configured []interface{}) *cloudfunctions.FailurePolicy { - if len(configured) == 0 || configured[0] == nil { - return &cloudfunctions.FailurePolicy{} - } - - if data := configured[0].(map[string]interface{}); data["retry"].(bool) { - return &cloudfunctions.FailurePolicy{ - Retry: &cloudfunctions.Retry{}, - } - } - - return nil -} - -func flattenFailurePolicy(failurePolicy *cloudfunctions.FailurePolicy) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if failurePolicy == nil { - return nil - } - - result = append(result, map[string]interface{}{ - "retry": failurePolicy.Retry != nil, - }) - - return result -} - -func expandSourceRepository(configured []interface{}) *cloudfunctions.SourceRepository { - if len(configured) == 0 || configured[0] == nil { - return &cloudfunctions.SourceRepository{} - } - - data := configured[0].(map[string]interface{}) - return &cloudfunctions.SourceRepository{ - Url: data["url"].(string), - } -} - -func flattenSourceRepository(sourceRepo *cloudfunctions.SourceRepository) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if sourceRepo == nil { - return nil - } - - result = append(result, map[string]interface{}{ - "url": sourceRepo.Url, - "deployed_url": sourceRepo.DeployedUrl, - }) - - return result -} - -func expandSecretEnvironmentVariables(configured []interface{}) []*cloudfunctions.SecretEnvVar { - if len(configured) == 0 { - return nil - } - result := make([]*cloudfunctions.SecretEnvVar, 0, len(configured)) - for _, e := range configured { - data := e.(map[string]interface{}) - result = append(result, &cloudfunctions.SecretEnvVar{ - Key: data["key"].(string), - ProjectId: data["project_id"].(string), - Secret: data["secret"].(string), - Version: data["version"].(string), - }) - } - return result -} - -func flattenSecretEnvironmentVariables(envVars []*cloudfunctions.SecretEnvVar) []map[string]interface{} { - if envVars == nil { - return nil - } - var result []map[string]interface{} - - for _, envVar := range envVars { - if envVar != nil { - data := map[string]interface{}{ - "key": envVar.Key, - "project_id": envVar.ProjectId, - "secret": envVar.Secret, - "version": envVar.Version, - } - result = append(result, data) - } - } - return result -} - -func expandSecretVolumes(configured []interface{}) []*cloudfunctions.SecretVolume { - if len(configured) == 0 { - return nil - } - result := make([]*cloudfunctions.SecretVolume, 0, len(configured)) - for _, e := range configured { - data := e.(map[string]interface{}) - result = append(result, &cloudfunctions.SecretVolume{ - MountPath: data["mount_path"].(string), - ProjectId: data["project_id"].(string), - Secret: data["secret"].(string), - Versions: expandSecretVersion(data["versions"].([]interface{})), //TODO - }) - } - return result -} - -func flattenSecretVolumes(secretVolumes []*cloudfunctions.SecretVolume) []map[string]interface{} { - if secretVolumes == nil { - return nil - } - var result []map[string]interface{} - - for _, secretVolume := range secretVolumes { - if secretVolume != nil { - data := map[string]interface{}{ - "mount_path": secretVolume.MountPath, - "project_id": secretVolume.ProjectId, - "secret": secretVolume.Secret, - "versions": flattenSecretVersion(secretVolume.Versions), - } - result = append(result, data) - } - } - return result -} - -func expandSecretVersion(configured []interface{}) []*cloudfunctions.SecretVersion { - if len(configured) == 0 { - return nil - } - result := make([]*cloudfunctions.SecretVersion, 0, len(configured)) - for _, e := range configured { - data := e.(map[string]interface{}) - result = append(result, &cloudfunctions.SecretVersion{ - Path: data["path"].(string), - Version: data["version"].(string), - }) - } - return result -} - -func flattenSecretVersion(secretVersions []*cloudfunctions.SecretVersion) []map[string]interface{} { - if secretVersions == nil { - return nil - } - var result []map[string]interface{} - - for _, secretVersion := range secretVersions { - if secretVersion != nil { - data := map[string]interface{}{ - "path": secretVersion.Path, - "version": secretVersion.Version, - } - result = append(result, data) - } - } - return result -} +type cloudFunctionId = cloudfunctions.CloudFunctionId diff --git a/mmv1/third_party/terraform/resources/resource_service_networking_connection.go b/mmv1/third_party/terraform/resources/resource_service_networking_connection.go index 31f13502b45c..7c7a8e24ea2c 100644 --- a/mmv1/third_party/terraform/resources/resource_service_networking_connection.go +++ b/mmv1/third_party/terraform/resources/resource_service_networking_connection.go @@ -1,421 +1,16 @@ package google import ( - "fmt" - "log" - "net/url" - "regexp" - "strings" - "time" - - "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/servicenetworking/v1" ) -func ResourceServiceNetworkingConnection() *schema.Resource { - return &schema.Resource{ - Create: resourceServiceNetworkingConnectionCreate, - Read: resourceServiceNetworkingConnectionRead, - Update: resourceServiceNetworkingConnectionUpdate, - Delete: resourceServiceNetworkingConnectionDelete, - Importer: &schema.ResourceImporter{ - State: resourceServiceNetworkingConnectionImportState, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Update: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, - - Schema: map[string]*schema.Schema{ - "network": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, - Description: `Name of VPC network connected with service producers using VPC peering.`, - }, - // NOTE(craigatgoogle): This field is weird, it's required to make the Insert/List calls as a parameter - // named "parent", however it's also defined in the response as an output field called "peering", which - // uses "-" as a delimiter instead of ".". To alleviate user confusion I've opted to model the gcloud - // CLI's approach, calling the field "service" and accepting the same format as the CLI with the "." - // delimiter. - // See: https://cloud.google.com/vpc/docs/configure-private-services-access#creating-connection - "service": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `Provider peering service that is managing peering connectivity for a service provider organization. For Google services that support this functionality it is 'servicenetworking.googleapis.com'.`, - }, - "reserved_peering_ranges": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Description: `Named IP address range(s) of PEERING type reserved for this service provider. Note that invoking this method with a different range when connection is already established will not reallocate already provisioned service producer subnetworks.`, - }, - "peering": { - Type: schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - connection := &servicenetworking.Connection{ - Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), - } - - networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) - if err != nil { - return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - parentService := formatParentService(d.Get("service").(string)) - // We use Patch instead of Create, because we're getting - // "Error waiting for Create Service Networking Connection: - // Error code 9, message: Cannot modify allocated ranges in - // CreateConnection. Please use UpdateConnection." - // if we're creating peerings to more than one VPC (like two - // CloudSQL instances within one project, peered with two - // clusters.) - // - // This is a workaround for: - // https://issuetracker.google.com/issues/131908322 - // - // The API docs don't specify that you can do connections/-, - // but that's what gcloud does, and it's easier than grabbing - // the connection name. - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - project = bp - } - - createCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) - if config.UserProjectOverride { - createCall.Header().Add("X-Goog-User-Project", project) - } - op, err := createCall.Do() - if err != nil { - return err - } - - if err := ServiceNetworkingOperationWaitTime(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { - return err - } - - connectionId := &connectionId{ - Network: network, - Service: d.Get("service").(string), - } - - d.SetId(connectionId.Id()) - return resourceServiceNetworkingConnectionRead(d, meta) -} - -func resourceServiceNetworkingConnectionRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) - } - - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, connectionId.Network, userAgent) - if err != nil { - return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - network := d.Get("network").(string) - networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) - if err != nil { - return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - project = bp - } - - parentService := formatParentService(connectionId.Service) - readCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.List(parentService).Network(serviceNetworkingNetworkName) - if config.UserProjectOverride { - readCall.Header().Add("X-Goog-User-Project", project) - } - response, err := readCall.Do() - if err != nil { - return err - } - - var connection *servicenetworking.Connection - for _, c := range response.Connections { - if c.Network == serviceNetworkingNetworkName { - connection = c - break - } - } - - if connection == nil { - d.SetId("") - log.Printf("[WARNING] Failed to find Service Networking Connection, network: %s service: %s", connectionId.Network, connectionId.Service) - return nil - } - - if err := d.Set("network", connectionId.Network); err != nil { - return fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service", connectionId.Service); err != nil { - return fmt.Errorf("Error setting service: %s", err) - } - if err := d.Set("peering", connection.Peering); err != nil { - return fmt.Errorf("Error setting peering: %s", err) - } - if err := d.Set("reserved_peering_ranges", connection.ReservedPeeringRanges); err != nil { - return fmt.Errorf("Error setting reserved_peering_ranges: %s", err) - } - return nil -} - -func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) - } - - parentService := formatParentService(connectionId.Service) - - if d.HasChange("reserved_peering_ranges") { - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - connection := &servicenetworking.Connection{ - Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), - } - - networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) - if err != nil { - return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - // The API docs don't specify that you can do connections/-, but that's what gcloud does, - // and it's easier than grabbing the connection name. - - // err == nil indicates that the billing_project value was found - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - project = bp - } - - patchCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) - if config.UserProjectOverride { - patchCall.Header().Add("X-Goog-User-Project", project) - } - op, err := patchCall.Do() - if err != nil { - return err - } - if err := ServiceNetworkingOperationWaitTime(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutUpdate)); err != nil { - return err - } - } - return resourceServiceNetworkingConnectionRead(d, meta) -} - -func resourceServiceNetworkingConnectionDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*transport_tpg.Config) - userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) - if err != nil { - return err - } - - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - peering := d.Get("peering").(string) - obj["name"] = peering - url := fmt.Sprintf("%s%s/removePeering", config.ComputeBasePath, serviceNetworkingNetworkName) - - networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) - if err != nil { - return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - - project := networkFieldValue.Project - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "POST", - Project: project, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutDelete), - }) - if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ServiceNetworkingConnection %q", d.Id())) - } - - op := &compute.Operation{} - err = tpgresource.Convert(res, op) - if err != nil { - return err - } - - err = ComputeOperationWaitTime( - config, op, project, "Updating Network", userAgent, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - log.Printf("[INFO] Service network connection removed.") - - return nil -} - -func resourceServiceNetworkingConnectionImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("network", connectionId.Network); err != nil { - return nil, fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service", connectionId.Service); err != nil { - return nil, fmt.Errorf("Error setting service: %s", err) - } - return []*schema.ResourceData{d}, nil -} - -// NOTE(craigatgoogle): The Connection resource in this API doesn't have an Id field, so inorder -// to support the Read method, we create an Id using the tuple(Network, Service). -type connectionId struct { - Network string - Service string -} - -func (id *connectionId) Id() string { - return fmt.Sprintf("%s:%s", url.QueryEscape(id.Network), url.QueryEscape(id.Service)) -} - -func parseConnectionId(id string) (*connectionId, error) { - res := strings.Split(id, ":") - - if len(res) != 2 { - return nil, fmt.Errorf("Failed to parse service networking connection id, value: %s", id) - } - - network, err := url.QueryUnescape(res[0]) - if err != nil { - return nil, errwrap.Wrapf("Failed to parse service networking connection id, invalid network, err: {{err}}", err) - } else if len(network) == 0 { - return nil, fmt.Errorf("Failed to parse service networking connection id, empty network") - } - - service, err := url.QueryUnescape(res[1]) - if err != nil { - return nil, errwrap.Wrapf("Failed to parse service networking connection id, invalid service, err: {{err}}", err) - } else if len(service) == 0 { - return nil, fmt.Errorf("Failed to parse service networking connection id, empty service") - } - - return &connectionId{ - Network: network, - Service: service, - }, nil -} - // NOTE(craigatgoogle): An out of band aspect of this API is that it uses a unique formatting of network // different from the standard self_link URI. It requires a call to the resource manager to get the project // number for the current project. func retrieveServiceNetworkingNetworkName(d *schema.ResourceData, config *transport_tpg.Config, network, userAgent string) (string, error) { - networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) - if err != nil { - return "", errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - - pid := networkFieldValue.Project - if pid == "" { - return "", fmt.Errorf("Could not determine project") - } - log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") - // err == nil indicates that the billing_project value was found - billingProject := pid - if bp, err := tpgresource.GetBillingProject(d, config); err == nil { - billingProject = bp - } - - getProjectCall := config.NewResourceManagerClient(userAgent).Projects.Get(pid) - if config.UserProjectOverride { - getProjectCall.Header().Add("X-Goog-User-Project", billingProject) - } - project, err := getProjectCall.Do() - if err != nil { - // note: returning a wrapped error is part of this method's contract! - // https://blog.golang.org/go1.13-errors - return "", fmt.Errorf("Failed to retrieve project, pid: %s, err: %w", pid, err) - } - - networkName := networkFieldValue.Name - if networkName == "" { - return "", fmt.Errorf("Failed to parse network") - } - - // return the network name formatting unique to this API - return fmt.Sprintf("projects/%v/global/networks/%v", project.ProjectNumber, networkName), nil - -} - -const parentServicePattern = "^services/.+$" + return servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) -// NOTE(craigatgoogle): An out of band aspect of this API is that it requires the service name to be -// formatted as "services/" -func formatParentService(service string) string { - r := regexp.MustCompile(parentServicePattern) - if !r.MatchString(service) { - return fmt.Sprintf("services/%s", service) - } else { - return service - } } diff --git a/mmv1/third_party/terraform/resources/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/resources/resource_sql_database_instance.go.erb index 334690f00e5d..4c9d6242751f 100644 --- a/mmv1/third_party/terraform/resources/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/resources/resource_sql_database_instance.go.erb @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-provider-google/google/verify" @@ -2221,7 +2222,7 @@ func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *t log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) // This call requires projects.get permissions, which may not have been granted to the Terraform actor, // particularly in shared VPC setups. Most will! But it's not strictly required. - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) + serviceNetworkingNetworkName, err := servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) if err != nil { var gerr *googleapi.Error if errors.As(err, &gerr) { diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go b/mmv1/third_party/terraform/services/cloudfunctions/data_source_google_cloudfunctions_function.go similarity index 92% rename from mmv1/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go rename to mmv1/third_party/terraform/services/cloudfunctions/data_source_google_cloudfunctions_function.go index e25802a139f6..04c0463dbf49 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_cloudfunctions_function.go +++ b/mmv1/third_party/terraform/services/cloudfunctions/data_source_google_cloudfunctions_function.go @@ -1,4 +1,4 @@ -package google +package cloudfunctions import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -35,13 +35,13 @@ func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta int return err } - cloudFuncId := &cloudFunctionId{ + cloudFuncId := &CloudFunctionId{ Project: project, Region: region, Name: d.Get("name").(string), } - d.SetId(cloudFuncId.cloudFunctionId()) + d.SetId(cloudFuncId.CloudFunctionId()) err = resourceCloudFunctionsRead(d, meta) if err != nil { diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go new file mode 100644 index 000000000000..20d26544972a --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go @@ -0,0 +1,1177 @@ +package cloudfunctions + +import ( + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/cloudfunctions/v1" + + "fmt" + "log" + "net/url" + "strconv" + "strings" + "time" +) + +var allowedIngressSettings = []string{ + "ALLOW_ALL", + "ALLOW_INTERNAL_AND_GCLB", + "ALLOW_INTERNAL_ONLY", +} + +var allowedVpcConnectorEgressSettings = []string{ + "ALL_TRAFFIC", + "PRIVATE_RANGES_ONLY", +} + +type CloudFunctionId struct { + Project string + Region string + Name string +} + +func (s *CloudFunctionId) CloudFunctionId() string { + return fmt.Sprintf("projects/%s/locations/%s/functions/%s", s.Project, s.Region, s.Name) +} + +// matches all international lower case letters, number, underscores and dashes. +var labelKeyRegex = regexp.MustCompile(`^[\p{Ll}0-9_-]+$`) + +func labelKeyValidator(val interface{}, key string) (warns []string, errs []error) { + if val == nil { + return + } + + m := val.(map[string]interface{}) + for k := range m { + if !labelKeyRegex.MatchString(k) { + errs = append(errs, fmt.Errorf("%q is an invalid label key. See https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements", k)) + } + } + return +} + +func (s *CloudFunctionId) locationId() string { + return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) +} + +func parseCloudFunctionId(d *schema.ResourceData, config *transport_tpg.Config) (*CloudFunctionId, error) { + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + return &CloudFunctionId{ + Project: d.Get("project").(string), + Region: d.Get("region").(string), + Name: d.Get("name").(string), + }, nil +} + +// Differs from validateGCEName because Cloud Functions allow capital letters +// at start/end +func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { + re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` + return verify.ValidateRegexp(re)(v, k) +} + +func partsCompare(a, b, reg string) bool { + + regex := regexp.MustCompile(reg) + if regex.MatchString(a) && regex.MatchString(b) { + aParts := regex.FindStringSubmatch(a) + bParts := regex.FindStringSubmatch(b) + for i := 0; i < len(aParts); i++ { + if aParts[i] != bParts[i] { + return false + } + } + } else if regex.MatchString(a) { + aParts := regex.FindStringSubmatch(a) + if aParts[len(aParts)-1] != b { + return false + } + } else if regex.MatchString(b) { + bParts := regex.FindStringSubmatch(b) + if bParts[len(bParts)-1] != a { + return false + } + } else { + if a != b { + return false + } + } + + return true +} + +// based on CompareSelfLinkOrResourceName, but less reusable and allows multi-/ +// strings in the new state (config) part +func compareSelfLinkOrResourceNameWithMultipleParts(_, old, new string, _ *schema.ResourceData) bool { + // two formats based on expandEventTrigger() + regex1 := "projects/(.+)/databases/\\(default\\)/documents/(.+)" + regex2 := "projects/(.+)/(.+)/(.+)" + return partsCompare(old, new, regex1) || partsCompare(old, new, regex2) +} + +func ResourceCloudFunctionsFunction() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudFunctionsCreate, + Read: resourceCloudFunctionsRead, + Update: resourceCloudFunctionsUpdate, + Delete: resourceCloudFunctionsDestroy, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A user-defined name of the function. Function names must be unique globally.`, + ValidateFunc: validateResourceCloudFunctionsFunctionName, + }, + + "build_worker_pool": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the Cloud Build Custom Worker Pool that should be used to build the function.`, + }, + + "source_archive_bucket": { + Type: schema.TypeString, + Optional: true, + Description: `The GCS bucket containing the zip archive which contains the function.`, + }, + + "source_archive_object": { + Type: schema.TypeString, + Optional: true, + Description: `The source archive object (file) in archive bucket.`, + }, + + "source_repository": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Represents parameters related to source repository where a function is hosted. Cannot be set alongside source_archive_bucket or source_archive_object.`, + ConflictsWith: []string{"source_archive_bucket", "source_archive_object"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: `The URL pointing to the hosted repository where the function is defined.`, + }, + "deployed_url": { + Type: schema.TypeString, + Computed: true, + Description: `The URL pointing to the hosted repository where the function was defined at the time of deployment.`, + }, + }, + }, + }, + + "docker_registry": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Docker Registry to use for storing the function's Docker images. Allowed values are CONTAINER_REGISTRY (default) and ARTIFACT_REGISTRY.`, + }, + + "docker_repository": { + Type: schema.TypeString, + Optional: true, + Description: `User managed repository created in Artifact Registry optionally with a customer managed encryption key. If specified, deployments will use Artifact Registry for storing images built with Cloud Build.`, + }, + + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `Resource name of a KMS crypto key (managed by the user) used to encrypt/decrypt function resources.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: `Description of the function.`, + }, + + "available_memory_mb": { + Type: schema.TypeInt, + Optional: true, + Default: 256, + Description: `Memory (in MB), available to the function. Default value is 256. Possible values include 128, 256, 512, 1024, etc.`, + }, + + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + ValidateFunc: validation.IntBetween(1, 540), + Description: `Timeout (in seconds) for the function. Default value is 60 seconds. Cannot be more than 540 seconds.`, + }, + + "entry_point": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the function that will be executed when the Google Cloud Function is triggered.`, + }, + + "ingress_settings": { + Type: schema.TypeString, + Optional: true, + Default: "ALLOW_ALL", + ValidateFunc: validation.StringInSlice(allowedIngressSettings, true), + Description: `String value that controls what traffic can reach the function. Allowed values are ALLOW_ALL and ALLOW_INTERNAL_ONLY. Changes to this field will recreate the cloud function.`, + }, + + "vpc_connector_egress_settings": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(allowedVpcConnectorEgressSettings, true), + Description: `The egress settings for the connector, controlling what traffic is diverted through it. Allowed values are ALL_TRAFFIC and PRIVATE_RANGES_ONLY. Defaults to PRIVATE_RANGES_ONLY. If unset, this field preserves the previously set value.`, + }, + + "labels": { + Type: schema.TypeMap, + ValidateFunc: labelKeyValidator, + Optional: true, + Description: `A set of key/value label pairs to assign to the function. Label keys must follow the requirements at https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements.`, + }, + + "runtime": { + Type: schema.TypeString, + Required: true, + Description: `The runtime in which the function is going to run. Eg. "nodejs8", "nodejs10", "python37", "go111".`, + }, + + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: ` If provided, the self-provided service account to run the function with.`, + }, + + "vpc_connector": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The VPC Network Connector that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network connector resource. The format of this field is projects/*/locations/*/connectors/*.`, + }, + + "environment_variables": { + Type: schema.TypeMap, + Optional: true, + Description: `A set of key/value environment variable pairs to assign to the function.`, + }, + + "build_environment_variables": { + Type: schema.TypeMap, + Optional: true, + Description: ` A set of key/value environment variable pairs available during build time.`, + }, + + "trigger_http": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Boolean variable. Any HTTP request (of a supported type) to the endpoint will trigger function execution. Supported HTTP request types are: POST, PUT, GET, DELETE, and OPTIONS. Endpoint is returned as https_trigger_url. Cannot be used with trigger_bucket and trigger_topic.`, + }, + + "event_trigger": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ConflictsWith: []string{"trigger_http"}, + MaxItems: 1, + Description: `A source that fires events in response to a condition in another service. Cannot be used with trigger_http.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + Description: `The type of event to observe. For example: "google.storage.object.finalize". See the documentation on calling Cloud Functions for a full reference of accepted triggers.`, + }, + "resource": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceNameWithMultipleParts, + Description: `The name or partial URI of the resource from which to observe events. For example, "myBucket" or "projects/my-project/topics/my-topic"`, + }, + "failure_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Specifies policy for failed executions`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retry": { + Type: schema.TypeBool, + // not strictly required, but this way an empty block can't be specified + Required: true, + Description: `Whether the function should be retried on failure. Defaults to false.`, + }, + }}, + }, + }, + }, + }, + + "https_trigger_url": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `URL which triggers function execution. Returned only if trigger_http is used.`, + }, + + "https_trigger_security_level": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The security level for the function. Defaults to SECURE_OPTIONAL. Valid only if trigger_http is used.`, + }, + + "max_instances": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The limit on the maximum number of function instances that may coexist at a given time.`, + }, + + "min_instances": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The limit on the minimum number of function instances that may coexist at a given time.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Project of the function. If it is not provided, the provider project is used.`, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Region of function. If it is not provided, the provider region is used.`, + }, + + "secret_environment_variables": { + Type: schema.TypeList, + Optional: true, + Description: `Secret environment variables configuration`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `Name of the environment variable.`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function.`, + }, + "secret": { + Type: schema.TypeString, + Required: true, + Description: `ID of the secret in secret manager (not the full resource name).`, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `Version of the secret (version number or the string "latest"). It is recommended to use a numeric version for secret environment variables as any updates to the secret value is not reflected until new clones start.`, + }, + }, + }, + }, + + "secret_volumes": { + Type: schema.TypeList, + Optional: true, + Description: `Secret volumes configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + Description: `The path within the container to mount the secret volume. For example, setting the mount_path as "/etc/secrets" would mount the secret value files under the "/etc/secrets" directory. This directory will also be completely shadowed and unavailable to mount any other secrets. Recommended mount paths: "/etc/secrets" Restricted mount paths: "/cloudsql", "/dev/log", "/pod", "/proc", "/var/log".`, + }, + "project_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Project identifier (due to a known limitation, only project number is supported by this field) of the project that contains the secret. If not set, it will be populated with the function's project, assuming that the secret exists in the same project as of the function.`, + }, + "secret": { + Type: schema.TypeString, + Required: true, + Description: `ID of the secret in secret manager (not the full resource name).`, + }, + "versions": { + Type: schema.TypeList, + Optional: true, + Description: `List of secret versions to mount for this secret. If empty, the "latest" version of the secret will be made available in a file named after the secret under the mount point.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + Description: `Relative path of the file under the mount path where the secret value for this version will be fetched and made available. For example, setting the mount_path as "/etc/secrets" and path as "/secret_foo" would mount the secret value file at "/etc/secrets/secret_foo".`, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `Version of the secret (version number or the string "latest"). It is preferable to use "latest" version with secret volumes as secret value changes are reflected immediately.`, + }, + }, + }, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the current stage of a deployment.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + cloudFuncId := &CloudFunctionId{ + Project: project, + Region: region, + Name: d.Get("name").(string), + } + + function := &cloudfunctions.CloudFunction{ + Name: cloudFuncId.CloudFunctionId(), + Runtime: d.Get("runtime").(string), + ServiceAccountEmail: d.Get("service_account_email").(string), + ForceSendFields: []string{}, + } + + sourceRepos := d.Get("source_repository").([]interface{}) + if len(sourceRepos) > 0 { + function.SourceRepository = expandSourceRepository(sourceRepos) + } else { + sourceArchiveBucket := d.Get("source_archive_bucket").(string) + sourceArchiveObj := d.Get("source_archive_object").(string) + if sourceArchiveBucket == "" || sourceArchiveObj == "" { + return fmt.Errorf("either source_repository or both of source_archive_bucket+source_archive_object must be set") + } + function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) + } + + secretEnv := d.Get("secret_environment_variables").([]interface{}) + if len(secretEnv) > 0 { + function.SecretEnvironmentVariables = expandSecretEnvironmentVariables(secretEnv) + } + + secretVolume := d.Get("secret_volumes").([]interface{}) + if len(secretVolume) > 0 { + function.SecretVolumes = expandSecretVolumes(secretVolume) + } + + if v, ok := d.GetOk("available_memory_mb"); ok { + availableMemoryMb := v.(int) + function.AvailableMemoryMb = int64(availableMemoryMb) + } + + if v, ok := d.GetOk("description"); ok { + function.Description = v.(string) + } + + if v, ok := d.GetOk("build_worker_pool"); ok { + function.BuildWorkerPool = v.(string) + } + + if v, ok := d.GetOk("entry_point"); ok { + function.EntryPoint = v.(string) + } + + if v, ok := d.GetOk("timeout"); ok { + function.Timeout = fmt.Sprintf("%vs", v.(int)) + } + + if v, ok := d.GetOk("event_trigger"); ok { + function.EventTrigger = expandEventTrigger(v.([]interface{}), project) + } else if v, ok := d.GetOk("trigger_http"); ok && v.(bool) { + function.HttpsTrigger = &cloudfunctions.HttpsTrigger{} + function.HttpsTrigger.SecurityLevel = d.Get("https_trigger_security_level").(string) + } else { + return fmt.Errorf("One of `event_trigger` or `trigger_http` is required: " + + "You must specify a trigger when deploying a new function.") + } + + if v, ok := d.GetOk("ingress_settings"); ok { + function.IngressSettings = v.(string) + } + + if _, ok := d.GetOk("labels"); ok { + function.Labels = tpgresource.ExpandLabels(d) + } + + if _, ok := d.GetOk("environment_variables"); ok { + function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) + } + + if _, ok := d.GetOk("build_environment_variables"); ok { + function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) + } + + if v, ok := d.GetOk("vpc_connector"); ok { + function.VpcConnector = v.(string) + } + + if v, ok := d.GetOk("vpc_connector_egress_settings"); ok { + function.VpcConnectorEgressSettings = v.(string) + } + + if v, ok := d.GetOk("docker_registry"); ok { + function.DockerRegistry = v.(string) + } + + if v, ok := d.GetOk("docker_repository"); ok { + function.DockerRepository = v.(string) + } + + if v, ok := d.GetOk("kms_key_name"); ok { + function.KmsKeyName = v.(string) + } + + if v, ok := d.GetOk("max_instances"); ok { + function.MaxInstances = int64(v.(int)) + } + + if v, ok := d.GetOk("min_instances"); ok { + function.MinInstances = int64(v.(int)) + } + + log.Printf("[DEBUG] Creating cloud function: %s", function.Name) + + // We retry the whole create-and-wait because Cloud Functions + // will sometimes fail a creation operation entirely if it fails to pull + // source code and we need to try the whole creation again. + rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Create( + cloudFuncId.locationId(), function).Do() + if err != nil { + return err + } + + // Name of function should be unique + d.SetId(cloudFuncId.CloudFunctionId()) + + return CloudFunctionsOperationWait(config, op, "Creating CloudFunctions Function", userAgent, + d.Timeout(schema.TimeoutCreate)) + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{IsCloudFunctionsSourceCodeError}, + }) + if rerr != nil { + return rerr + } + log.Printf("[DEBUG] Finished creating cloud function: %s", function.Name) + return resourceCloudFunctionsRead(d, meta) +} + +func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + cloudFuncId, err := parseCloudFunctionId(d, config) + if err != nil { + return err + } + + function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) + } + + if err := d.Set("name", cloudFuncId.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", function.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("build_worker_pool", function.BuildWorkerPool); err != nil { + return fmt.Errorf("Error setting build_worker_pool: %s", err) + } + if err := d.Set("entry_point", function.EntryPoint); err != nil { + return fmt.Errorf("Error setting entry_point: %s", err) + } + if err := d.Set("available_memory_mb", function.AvailableMemoryMb); err != nil { + return fmt.Errorf("Error setting available_memory_mb: %s", err) + } + sRemoved := strings.Replace(function.Timeout, "s", "", -1) + timeout, err := strconv.Atoi(sRemoved) + if err != nil { + return err + } + if err := d.Set("timeout", timeout); err != nil { + return fmt.Errorf("Error setting timeout: %s", err) + } + if err := d.Set("ingress_settings", function.IngressSettings); err != nil { + return fmt.Errorf("Error setting ingress_settings: %s", err) + } + if err := d.Set("labels", function.Labels); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := d.Set("runtime", function.Runtime); err != nil { + return fmt.Errorf("Error setting runtime: %s", err) + } + if err := d.Set("service_account_email", function.ServiceAccountEmail); err != nil { + return fmt.Errorf("Error setting service_account_email: %s", err) + } + if err := d.Set("environment_variables", function.EnvironmentVariables); err != nil { + return fmt.Errorf("Error setting environment_variables: %s", err) + } + if err := d.Set("vpc_connector", function.VpcConnector); err != nil { + return fmt.Errorf("Error setting vpc_connector: %s", err) + } + if err := d.Set("vpc_connector_egress_settings", function.VpcConnectorEgressSettings); err != nil { + return fmt.Errorf("Error setting vpc_connector_egress_settings: %s", err) + } + if function.SourceArchiveUrl != "" { + // sourceArchiveUrl should always be a Google Cloud Storage URL (e.g. gs://bucket/object) + // https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions + sourceURL, err := url.Parse(function.SourceArchiveUrl) + if err != nil { + return err + } + bucket := sourceURL.Host + object := strings.TrimLeft(sourceURL.Path, "/") + if err := d.Set("source_archive_bucket", bucket); err != nil { + return fmt.Errorf("Error setting source_archive_bucket: %s", err) + } + if err := d.Set("source_archive_object", object); err != nil { + return fmt.Errorf("Error setting source_archive_object: %s", err) + } + } + if err := d.Set("source_repository", flattenSourceRepository(function.SourceRepository)); err != nil { + return fmt.Errorf("Error setting source_repository: %s", err) + } + + if err := d.Set("secret_environment_variables", flattenSecretEnvironmentVariables(function.SecretEnvironmentVariables)); err != nil { + return fmt.Errorf("Error setting secret_environment_variables: %s", err) + } + + if err := d.Set("secret_volumes", flattenSecretVolumes(function.SecretVolumes)); err != nil { + return fmt.Errorf("Error setting secret_volumes: %s", err) + } + + if err := d.Set("status", function.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + + if function.HttpsTrigger != nil { + if err := d.Set("trigger_http", true); err != nil { + return fmt.Errorf("Error setting trigger_http: %s", err) + } + if err := d.Set("https_trigger_url", function.HttpsTrigger.Url); err != nil { + return fmt.Errorf("Error setting https_trigger_url: %s", err) + } + if err := d.Set("https_trigger_security_level", function.HttpsTrigger.SecurityLevel); err != nil { + return fmt.Errorf("Error setting https_trigger_security_level: %s", err) + } + } + + if err := d.Set("event_trigger", flattenEventTrigger(function.EventTrigger)); err != nil { + return fmt.Errorf("Error setting event_trigger: %s", err) + } + if err := d.Set("docker_registry", function.DockerRegistry); err != nil { + return fmt.Errorf("Error setting docker_registry: %s", err) + } + if err := d.Set("docker_repository", function.DockerRepository); err != nil { + return fmt.Errorf("Error setting docker_repository: %s", err) + } + if err := d.Set("kms_key_name", function.KmsKeyName); err != nil { + return fmt.Errorf("Error setting kms_key_name: %s", err) + } + if err := d.Set("max_instances", function.MaxInstances); err != nil { + return fmt.Errorf("Error setting max_instances: %s", err) + } + if err := d.Set("min_instances", function.MinInstances); err != nil { + return fmt.Errorf("Error setting min_instances: %s", err) + } + if err := d.Set("region", cloudFuncId.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("project", cloudFuncId.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_cloudfunctions_function") + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + cloudFuncId, err := parseCloudFunctionId(d, config) + if err != nil { + return err + } + + // The full function needs to supplied in the PATCH call to evaluate some Organization Policies. https://github.com/hashicorp/terraform-provider-google/issues/6603 + function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) + } + + // The full function may contain a reference to manually uploaded code if the function was imported from gcloud + // This does not work with Terraform, so zero it out from the function if it exists. See https://github.com/hashicorp/terraform-provider-google/issues/7921 + function.SourceUploadUrl = "" + + d.Partial(true) + + var updateMaskArr []string + if d.HasChange("available_memory_mb") { + availableMemoryMb := d.Get("available_memory_mb").(int) + function.AvailableMemoryMb = int64(availableMemoryMb) + updateMaskArr = append(updateMaskArr, "availableMemoryMb") + } + + if d.HasChange("source_archive_bucket") || d.HasChange("source_archive_object") { + sourceArchiveBucket := d.Get("source_archive_bucket").(string) + sourceArchiveObj := d.Get("source_archive_object").(string) + function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) + updateMaskArr = append(updateMaskArr, "sourceArchiveUrl") + } + + if d.HasChange("source_repository") { + function.SourceRepository = expandSourceRepository(d.Get("source_repository").([]interface{})) + updateMaskArr = append(updateMaskArr, "sourceRepository") + } + + if d.HasChange("secret_environment_variables") { + function.SecretEnvironmentVariables = expandSecretEnvironmentVariables(d.Get("secret_environment_variables").([]interface{})) + updateMaskArr = append(updateMaskArr, "secretEnvironmentVariables") + } + + if d.HasChange("secret_volumes") { + function.SecretVolumes = expandSecretVolumes(d.Get("secret_volumes").([]interface{})) + updateMaskArr = append(updateMaskArr, "secretVolumes") + } + + if d.HasChange("description") { + function.Description = d.Get("description").(string) + updateMaskArr = append(updateMaskArr, "description") + } + + if d.HasChange("build_worker_pool") { + function.BuildWorkerPool = d.Get("build_worker_pool").(string) + updateMaskArr = append(updateMaskArr, "build_worker_pool") + } + + if d.HasChange("timeout") { + function.Timeout = fmt.Sprintf("%vs", d.Get("timeout").(int)) + updateMaskArr = append(updateMaskArr, "timeout") + } + + if d.HasChange("ingress_settings") { + function.IngressSettings = d.Get("ingress_settings").(string) + updateMaskArr = append(updateMaskArr, "ingressSettings") + } + + if d.HasChange("labels") { + function.Labels = tpgresource.ExpandLabels(d) + updateMaskArr = append(updateMaskArr, "labels") + } + + if d.HasChange("runtime") { + function.Runtime = d.Get("runtime").(string) + updateMaskArr = append(updateMaskArr, "runtime") + } + + if d.HasChange("environment_variables") { + function.EnvironmentVariables = tpgresource.ExpandEnvironmentVariables(d) + updateMaskArr = append(updateMaskArr, "environmentVariables") + } + + if d.HasChange("build_environment_variables") { + function.BuildEnvironmentVariables = tpgresource.ExpandBuildEnvironmentVariables(d) + updateMaskArr = append(updateMaskArr, "buildEnvironmentVariables") + } + + if d.HasChange("vpc_connector") { + function.VpcConnector = d.Get("vpc_connector").(string) + updateMaskArr = append(updateMaskArr, "vpcConnector") + } + + if d.HasChange("vpc_connector_egress_settings") { + function.VpcConnectorEgressSettings = d.Get("vpc_connector_egress_settings").(string) + updateMaskArr = append(updateMaskArr, "vpcConnectorEgressSettings") + } + + if d.HasChange("event_trigger") { + function.EventTrigger = expandEventTrigger(d.Get("event_trigger").([]interface{}), project) + updateMaskArr = append(updateMaskArr, "eventTrigger", "eventTrigger.failurePolicy.retry") + } + + if d.HasChange("https_trigger_security_level") { + if function.HttpsTrigger == nil { + function.HttpsTrigger = &cloudfunctions.HttpsTrigger{} + } + function.HttpsTrigger.SecurityLevel = d.Get("https_trigger_security_level").(string) + updateMaskArr = append(updateMaskArr, "httpsTrigger", "httpsTrigger.securityLevel") + } + + if d.HasChange("docker_registry") { + function.DockerRegistry = d.Get("docker_registry").(string) + updateMaskArr = append(updateMaskArr, "dockerRegistry") + } + + if d.HasChange("docker_repository") { + function.DockerRepository = d.Get("docker_repository").(string) + updateMaskArr = append(updateMaskArr, "dockerRepository") + } + + if d.HasChange("kms_key_name") { + function.KmsKeyName = d.Get("kms_key_name").(string) + updateMaskArr = append(updateMaskArr, "kmsKeyName") + } + + if d.HasChange("max_instances") { + function.MaxInstances = int64(d.Get("max_instances").(int)) + updateMaskArr = append(updateMaskArr, "maxInstances") + } + + if d.HasChange("min_instances") { + function.MinInstances = int64(d.Get("min_instances").(int)) + updateMaskArr = append(updateMaskArr, "minInstances") + } + + if len(updateMaskArr) > 0 { + log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) + updateMask := strings.Join(updateMaskArr, ",") + rerr := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Patch(function.Name, function). + UpdateMask(updateMask).Do() + if err != nil { + return err + } + + return CloudFunctionsOperationWait(config, op, "Updating CloudFunctions Function", userAgent, + d.Timeout(schema.TimeoutUpdate)) + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if rerr != nil { + return fmt.Errorf("Error while updating cloudfunction configuration: %s", rerr) + } + } + d.Partial(false) + + return resourceCloudFunctionsRead(d, meta) +} + +func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + cloudFuncId, err := parseCloudFunctionId(d, config) + if err != nil { + return err + } + + op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Delete(cloudFuncId.CloudFunctionId()).Do() + if err != nil { + return err + } + err = CloudFunctionsOperationWait(config, op, "Deleting CloudFunctions Function", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandEventTrigger(configured []interface{}, project string) *cloudfunctions.EventTrigger { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + eventType := data["event_type"].(string) + resource := data["resource"].(string) + + // if resource starts with "projects/", we can reasonably assume it's a + // partial URI. Otherwise, it's a shortname. Construct a partial URI based + // on the event type if so. + if !strings.HasPrefix(resource, "projects/") { + shape := "" + switch { + case strings.HasPrefix(eventType, "google.storage.object."): + shape = "projects/%s/buckets/%s" + case strings.HasPrefix(eventType, "google.pubsub.topic."): + shape = "projects/%s/topics/%s" + // Legacy style triggers + case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"): + // Note that this is an uncommon way to refer to buckets; normally, + // you'd use to the global URL of the bucket and not the project + // scoped one. + shape = "projects/%s/buckets/%s" + case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"): + shape = "projects/%s/topics/%s" + case strings.HasPrefix(eventType, "providers/cloud.firestore/eventTypes/"): + // Firestore doesn't not yet support multiple databases, so "(default)" is assumed. + // https://cloud.google.com/functions/docs/calling/cloud-firestore#deploying_your_function + shape = "projects/%s/databases/(default)/documents/%s" + } + + resource = fmt.Sprintf(shape, project, resource) + } + + return &cloudfunctions.EventTrigger{ + EventType: eventType, + Resource: resource, + FailurePolicy: expandFailurePolicy(data["failure_policy"].([]interface{})), + } +} + +func flattenEventTrigger(eventTrigger *cloudfunctions.EventTrigger) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if eventTrigger == nil { + return result + } + + result = append(result, map[string]interface{}{ + "event_type": eventTrigger.EventType, + "resource": eventTrigger.Resource, + "failure_policy": flattenFailurePolicy(eventTrigger.FailurePolicy), + }) + + return result +} + +func expandFailurePolicy(configured []interface{}) *cloudfunctions.FailurePolicy { + if len(configured) == 0 || configured[0] == nil { + return &cloudfunctions.FailurePolicy{} + } + + if data := configured[0].(map[string]interface{}); data["retry"].(bool) { + return &cloudfunctions.FailurePolicy{ + Retry: &cloudfunctions.Retry{}, + } + } + + return nil +} + +func flattenFailurePolicy(failurePolicy *cloudfunctions.FailurePolicy) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if failurePolicy == nil { + return nil + } + + result = append(result, map[string]interface{}{ + "retry": failurePolicy.Retry != nil, + }) + + return result +} + +func expandSourceRepository(configured []interface{}) *cloudfunctions.SourceRepository { + if len(configured) == 0 || configured[0] == nil { + return &cloudfunctions.SourceRepository{} + } + + data := configured[0].(map[string]interface{}) + return &cloudfunctions.SourceRepository{ + Url: data["url"].(string), + } +} + +func flattenSourceRepository(sourceRepo *cloudfunctions.SourceRepository) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if sourceRepo == nil { + return nil + } + + result = append(result, map[string]interface{}{ + "url": sourceRepo.Url, + "deployed_url": sourceRepo.DeployedUrl, + }) + + return result +} + +func expandSecretEnvironmentVariables(configured []interface{}) []*cloudfunctions.SecretEnvVar { + if len(configured) == 0 { + return nil + } + result := make([]*cloudfunctions.SecretEnvVar, 0, len(configured)) + for _, e := range configured { + data := e.(map[string]interface{}) + result = append(result, &cloudfunctions.SecretEnvVar{ + Key: data["key"].(string), + ProjectId: data["project_id"].(string), + Secret: data["secret"].(string), + Version: data["version"].(string), + }) + } + return result +} + +func flattenSecretEnvironmentVariables(envVars []*cloudfunctions.SecretEnvVar) []map[string]interface{} { + if envVars == nil { + return nil + } + var result []map[string]interface{} + + for _, envVar := range envVars { + if envVar != nil { + data := map[string]interface{}{ + "key": envVar.Key, + "project_id": envVar.ProjectId, + "secret": envVar.Secret, + "version": envVar.Version, + } + result = append(result, data) + } + } + return result +} + +func expandSecretVolumes(configured []interface{}) []*cloudfunctions.SecretVolume { + if len(configured) == 0 { + return nil + } + result := make([]*cloudfunctions.SecretVolume, 0, len(configured)) + for _, e := range configured { + data := e.(map[string]interface{}) + result = append(result, &cloudfunctions.SecretVolume{ + MountPath: data["mount_path"].(string), + ProjectId: data["project_id"].(string), + Secret: data["secret"].(string), + Versions: expandSecretVersion(data["versions"].([]interface{})), //TODO + }) + } + return result +} + +func flattenSecretVolumes(secretVolumes []*cloudfunctions.SecretVolume) []map[string]interface{} { + if secretVolumes == nil { + return nil + } + var result []map[string]interface{} + + for _, secretVolume := range secretVolumes { + if secretVolume != nil { + data := map[string]interface{}{ + "mount_path": secretVolume.MountPath, + "project_id": secretVolume.ProjectId, + "secret": secretVolume.Secret, + "versions": flattenSecretVersion(secretVolume.Versions), + } + result = append(result, data) + } + } + return result +} + +func expandSecretVersion(configured []interface{}) []*cloudfunctions.SecretVersion { + if len(configured) == 0 { + return nil + } + result := make([]*cloudfunctions.SecretVersion, 0, len(configured)) + for _, e := range configured { + data := e.(map[string]interface{}) + result = append(result, &cloudfunctions.SecretVersion{ + Path: data["path"].(string), + Version: data["version"].(string), + }) + } + return result +} + +func flattenSecretVersion(secretVersions []*cloudfunctions.SecretVersion) []map[string]interface{} { + if secretVersions == nil { + return nil + } + var result []map[string]interface{} + + for _, secretVersion := range secretVersions { + if secretVersion != nil { + data := map[string]interface{}{ + "path": secretVersion.Path, + "version": secretVersion.Version, + } + result = append(result, data) + } + } + return result +} diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_internal_test.go b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_internal_test.go new file mode 100644 index 000000000000..d28e1f41c2fe --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_internal_test.go @@ -0,0 +1,217 @@ +package cloudfunctions + +import ( + "testing" +) + +func TestCloudFunctionsFunction_nameValidator(t *testing.T) { + validNames := []string{ + "a", + "aA", + "a0", + "has-hyphen", + "has_underscore", + "hasUpperCase", + "allChars_-A0", + "StartsUpperCase", + "endsUpperCasE", + } + for _, tc := range validNames { + wrns, errs := validateResourceCloudFunctionsFunctionName(tc, "function.name") + if len(wrns) > 0 { + t.Errorf("Expected no validation warnings for test case %q, got: %+v", tc, wrns) + } + if len(errs) > 0 { + t.Errorf("Expected no validation errors for test name %q, got: %+v", tc, errs) + } + } + + invalidNames := []string{ + "0startsWithNumber", + "endsWith_", + "endsWith-", + "bad*Character", + "aCloudFunctionsFunctionNameThatIsSeventyFiveCharactersLongWhichIsMoreThan63", + } + for _, tc := range invalidNames { + _, errs := validateResourceCloudFunctionsFunctionName(tc, "function.name") + if len(errs) == 0 { + t.Errorf("Expected errors for invalid test name %q, got none", tc) + } + } +} + +func TestValidLabelKeys(t *testing.T) { + testCases := []struct { + labelKey string + valid bool + }{ + { + "test-label", true, + }, + { + "test_label", true, + }, + { + "MixedCase", false, + }, + { + "number-09-dash", true, + }, + { + "", false, + }, + { + "test-label", true, + }, + { + "mixed*symbol", false, + }, + { + "intérnätional", true, + }, + } + + for _, tc := range testCases { + labels := make(map[string]interface{}) + labels[tc.labelKey] = "test value" + + _, errs := labelKeyValidator(labels, "") + if tc.valid && len(errs) > 0 { + t.Errorf("Validation failure, key: '%s' should be valid but actual errors were %q", tc.labelKey, errs) + } + if !tc.valid && len(errs) < 1 { + t.Errorf("Validation failure, key: '%s' should fail but actual errors were %q", tc.labelKey, errs) + } + } +} + +func TestCompareSelfLinkOrResourceNameWithMultipleParts(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "projects to no projects doc": { + Old: "projects/myproject/databases/default/documents/resource", + New: "resource", + ExpectDiffSuppress: true, + }, + "no projects to projects doc": { + Old: "resource", + New: "projects/myproject/databases/default/documents/resource", + ExpectDiffSuppress: true, + }, + "projects to projects doc": { + Old: "projects/myproject/databases/default/documents/resource", + New: "projects/myproject/databases/default/documents/resource", + ExpectDiffSuppress: true, + }, + "multi messages doc": { + Old: "messages/{messageId}", + New: "projects/myproject/databases/(default)/documents/messages/{messageId}", + ExpectDiffSuppress: true, + }, + "multi messages 2 doc": { + Old: "projects/myproject/databases/(default)/documents/messages/{messageId}", + New: "messages/{messageId}", + ExpectDiffSuppress: true, + }, + "projects to no projects topics": { + Old: "projects/myproject/topics/resource", + New: "resource", + ExpectDiffSuppress: true, + }, + "no projects to projects topics": { + Old: "resource", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: true, + }, + "projects to projects topics": { + Old: "projects/myproject/topics/resource", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: true, + }, + + "unmatched projects to no projects doc": { + Old: "projects/myproject/databases/default/documents/resource", + New: "resourcex", + ExpectDiffSuppress: false, + }, + "unmatched no projects to projects doc": { + Old: "resourcex", + New: "projects/myproject/databases/default/documents/resource", + ExpectDiffSuppress: false, + }, + "unmatched projects to projects doc": { + Old: "projects/myproject/databases/default/documents/resource", + New: "projects/myproject/databases/default/documents/resourcex", + ExpectDiffSuppress: false, + }, + "unmatched projects to projects 2 doc": { + Old: "projects/myprojectx/databases/default/documents/resource", + New: "projects/myproject/databases/default/documents/resource", + ExpectDiffSuppress: false, + }, + "unmatched projects to empty doc": { + Old: "", + New: "projects/myproject/databases/default/documents/resource", + ExpectDiffSuppress: false, + }, + "unmatched empty to projects 2 doc": { + Old: "projects/myprojectx/databases/default/documents/resource", + New: "", + ExpectDiffSuppress: false, + }, + "unmatched default to default2 doc": { + Old: "projects/myproject/databases/default/documents/resource", + New: "projects/myproject/databases/default2/documents/resource", + ExpectDiffSuppress: false, + }, + "unmatched projects to no projects topics": { + Old: "projects/myproject/topics/resource", + New: "resourcex", + ExpectDiffSuppress: false, + }, + "unmatched no projects to projects topics": { + Old: "resourcex", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: false, + }, + "unmatched projects to projects topics": { + Old: "projects/myproject/topics/resource", + New: "projects/myproject/topics/resourcex", + ExpectDiffSuppress: false, + }, + "unmatched projects to projects 2 topics": { + Old: "projects/myprojectx/topics/resource", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: false, + }, + "unmatched projects to empty topics": { + Old: "projects/myproject/topics/resource", + New: "", + ExpectDiffSuppress: false, + }, + "unmatched empty to projects topics": { + Old: "", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: false, + }, + "unmatched resource to resource-partial": { + Old: "resource", + New: "resource-partial", + ExpectDiffSuppress: false, + }, + "unmatched resource-partial to projects": { + Old: "resource-partial", + New: "projects/myproject/topics/resource", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compareSelfLinkOrResourceNameWithMultipleParts("resource", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} diff --git a/mmv1/third_party/terraform/data_sources/data_source_cloud_run_locations.go b/mmv1/third_party/terraform/services/cloudrun/data_source_cloud_run_locations.go similarity index 99% rename from mmv1/third_party/terraform/data_sources/data_source_cloud_run_locations.go rename to mmv1/third_party/terraform/services/cloudrun/data_source_cloud_run_locations.go index 0b3fbb4a086c..cf0be9d96f9d 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_cloud_run_locations.go +++ b/mmv1/third_party/terraform/services/cloudrun/data_source_cloud_run_locations.go @@ -1,4 +1,4 @@ -package google +package cloudrun import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_composer_environment.go b/mmv1/third_party/terraform/services/composer/data_source_google_composer_environment.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_google_composer_environment.go rename to mmv1/third_party/terraform/services/composer/data_source_google_composer_environment.go index dfd4e0116270..f536428c6d3c 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_composer_environment.go +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_environment.go @@ -1,4 +1,4 @@ -package google +package composer import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_composer_image_versions.go b/mmv1/third_party/terraform/services/composer/data_source_google_composer_image_versions.go similarity index 95% rename from mmv1/third_party/terraform/data_sources/data_source_google_composer_image_versions.go rename to mmv1/third_party/terraform/services/composer/data_source_google_composer_image_versions.go index 8766500db7c8..c6f180bd35b9 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_composer_image_versions.go +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_image_versions.go @@ -1,4 +1,4 @@ -package google +package composer import ( "fmt" @@ -66,7 +66,7 @@ func dataSourceGoogleComposerImageVersionsRead(d *schema.ResourceData, meta inte return err } - versions, err := paginatedListRequest(project, url, userAgent, config, flattenGoogleComposerImageVersions) + versions, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenGoogleComposerImageVersions) if err != nil { return fmt.Errorf("Error listing Composer image versions: %s", err) } diff --git a/mmv1/third_party/terraform/resources/resource_composer_environment.go.erb b/mmv1/third_party/terraform/services/composer/resource_composer_environment.go.erb similarity index 99% rename from mmv1/third_party/terraform/resources/resource_composer_environment.go.erb rename to mmv1/third_party/terraform/services/composer/resource_composer_environment.go.erb index 811e4479c8df..8dd8710e7dc3 100644 --- a/mmv1/third_party/terraform/resources/resource_composer_environment.go.erb +++ b/mmv1/third_party/terraform/services/composer/resource_composer_environment.go.erb @@ -1,5 +1,5 @@ <% autogen_exception -%> -package google +package composer import ( "fmt" @@ -326,7 +326,7 @@ func ResourceComposerEnvironment() *schema.Resource { ForceNew: true, AtLeastOneOf: composerIpAllocationPolicyKeys, Description: `The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name"}, }, "services_ipv4_cidr_block": { @@ -335,7 +335,7 @@ func ResourceComposerEnvironment() *schema.Resource { ForceNew: true, AtLeastOneOf: composerIpAllocationPolicyKeys, Description: `The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name"}, }, }, @@ -879,7 +879,7 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) } env := &composer.Environment{ - Name: envName.resourceName(), + Name: envName.ResourceName(), Labels: tpgresource.ExpandLabels(d), Config: transformedConfig, } @@ -887,8 +887,8 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) // Some fields cannot be specified during create and must be updated post-creation. updateOnlyEnv := getComposerEnvironmentPostCreateUpdateObj(env) - log.Printf("[DEBUG] Creating new Environment %q", envName.parentName()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.parentName(), env).Do() + log.Printf("[DEBUG] Creating new Environment %q", envName.ParentName()) + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.ParentName(), env).Do() if err != nil { return err } @@ -939,7 +939,7 @@ func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) e return err } - res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComposerEnvironment %q", d.Id())) } @@ -1227,7 +1227,7 @@ func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *co } op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments. - Patch(envName.resourceName(), env). + Patch(envName.ResourceName(), env). UpdateMask(updateMask).Do() if err != nil { return err @@ -1258,7 +1258,7 @@ func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() if err != nil { return err } @@ -2230,7 +2230,7 @@ func validateComposerEnvironmentEnvVariables(v interface{}, k string) (ws []stri return ws, errors } -func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvironmentName, d *schema.ResourceData, config *transport_tpg.Config) error { +func handleComposerEnvironmentCreationOpFailure(id string, envName *ComposerEnvironmentName, d *schema.ResourceData, config *transport_tpg.Config) error { userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -2238,11 +2238,11 @@ func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvi log.Printf("[WARNING] Creation operation for Composer Environment %q failed, check Environment isn't still running", id) // Try to get possible created but invalid environment. - env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() + env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() if err != nil { // If error is 401, we don't have to clean up environment, return nil. // Otherwise, we encountered another error. - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.resourceName())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.ResourceName())) } if env.State == "CREATING" { @@ -2253,7 +2253,7 @@ func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvi } log.Printf("[WARNING] Environment %q from failed creation operation was created, deleting.", id) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() if err != nil { return fmt.Errorf("Could not delete the invalid created environment with state %q: %s", env.State, err) } @@ -2289,7 +2289,7 @@ func getComposerEnvironmentPostCreateUpdateObj(env *composer.Environment) (updat return updateEnv } -func resourceComposerEnvironmentName(d *schema.ResourceData, config *transport_tpg.Config) (*composerEnvironmentName, error) { +func resourceComposerEnvironmentName(d *schema.ResourceData, config *transport_tpg.Config) (*ComposerEnvironmentName, error) { project, err := tpgresource.GetProject(d, config) if err != nil { return nil, err @@ -2300,24 +2300,24 @@ func resourceComposerEnvironmentName(d *schema.ResourceData, config *transport_t return nil, err } - return &composerEnvironmentName{ + return &ComposerEnvironmentName{ Project: project, Region: region, Environment: d.Get("name").(string), }, nil } -type composerEnvironmentName struct { +type ComposerEnvironmentName struct { Project string Region string Environment string } -func (n *composerEnvironmentName) resourceName() string { +func (n *ComposerEnvironmentName) ResourceName() string { return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) } -func (n *composerEnvironmentName) parentName() string { +func (n *ComposerEnvironmentName) ParentName() string { return fmt.Sprintf("projects/%s/locations/%s", n.Project, n.Region) } diff --git a/mmv1/third_party/terraform/services/composer/resource_composer_environment_internal_test.go b/mmv1/third_party/terraform/services/composer/resource_composer_environment_internal_test.go new file mode 100644 index 000000000000..a20f3368bb5c --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/resource_composer_environment_internal_test.go @@ -0,0 +1,35 @@ +package composer + +import ( + "testing" +) + +func TestComposerImageVersionDiffSuppress(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + old string + new string + expected bool + }{ + {"matches", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.10.0", true}, + {"preview matches", "composer-1.17.0-preview.0-airflow-2.0.1", "composer-1.17.0-preview.0-airflow-2.0.1", true}, + {"old latest", "composer-latest-airflow-1.10.0", "composer-1.4.1-airflow-1.10.0", true}, + {"new latest", "composer-1.4.1-airflow-1.10.0", "composer-latest-airflow-1.10.0", true}, + {"composer major alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1-airflow-1.10", true}, + {"composer major alias different", "composer-1.4.0-airflow-2.1.4", "composer-2-airflow-2.2", false}, + {"composer different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.1-airflow-1.10.0", false}, + {"airflow major alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1", true}, + {"airflow major alias different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-2", false}, + {"airflow major.minor alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.10", true}, + {"airflow major.minor alias different", "composer-1.4.0-airflow-2.1.4", "composer-1.4.0-airflow-2.2", false}, + {"airflow different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.9.0", false}, + } + + for _, tc := range cases { + if actual := composerImageVersionDiffSuppress("", tc.old, tc.new, nil); actual != tc.expected { + t.Errorf("'%s' failed, expected %v but got %v", tc.name, tc.expected, actual) + } + } +} diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_cluster.go b/mmv1/third_party/terraform/services/container/data_source_google_container_cluster.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_cluster.go rename to mmv1/third_party/terraform/services/container/data_source_google_container_cluster.go index 86bbf0d7144c..d40ff3240c70 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_cluster.go +++ b/mmv1/third_party/terraform/services/container/data_source_google_container_cluster.go @@ -1,4 +1,4 @@ -package google +package container import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_engine_versions.go b/mmv1/third_party/terraform/services/container/data_source_google_container_engine_versions.go similarity index 99% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_engine_versions.go rename to mmv1/third_party/terraform/services/container/data_source_google_container_engine_versions.go index 3474ddbdccdb..8f4006428741 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_engine_versions.go +++ b/mmv1/third_party/terraform/services/container/data_source_google_container_engine_versions.go @@ -1,4 +1,4 @@ -package google +package container import ( "fmt" diff --git a/mmv1/third_party/terraform/utils/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb similarity index 99% rename from mmv1/third_party/terraform/utils/node_config.go.erb rename to mmv1/third_party/terraform/services/container/node_config.go.erb index a78d15b68416..f4f0da9b05ed 100644 --- a/mmv1/third_party/terraform/utils/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -1,5 +1,5 @@ <% autogen_exception -%> -package google +package container import ( <% unless version == "ga" -%> diff --git a/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb similarity index 99% rename from mmv1/third_party/terraform/resources/resource_container_cluster.go.erb rename to mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 241391b1049e..011c406b48a8 100644 --- a/mmv1/third_party/terraform/resources/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1,5 +1,5 @@ <% autogen_exception -%> -package google +package container import ( "context" @@ -1447,7 +1447,7 @@ func ResourceContainerCluster() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, Description: `The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, }, @@ -1457,7 +1457,7 @@ func ResourceContainerCluster() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, Description: `The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, }, @@ -5695,11 +5695,6 @@ func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *trans }, nil } -func cidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // If the user specified a size and the API returned a full cidr block, suppress. - return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) -} - // Suppress unremovable default scope values from GCP. // If the default service account would not otherwise have it, the `monitoring.write` scope // is added to a GKE cluster's scopes regardless of what the user provided. @@ -5719,7 +5714,7 @@ func containerClusterAddedScopesSuppress(k, old, new string, d *schema.ResourceD } // combine what the default scopes are with what was passed - m := golangSetFromStringSlice(append(addedScopes, tpgresource.ConvertStringArr(n.([]interface{}))...)) + m := tpgresource.GolangSetFromStringSlice(append(addedScopes, tpgresource.ConvertStringArr(n.([]interface{}))...)) combined := tpgresource.StringSliceFromGolangSet(m) // compare if the combined new scopes and default scopes differ from the old scopes diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_internal_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_internal_test.go.erb new file mode 100644 index 000000000000..5d2f180027e0 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_internal_test.go.erb @@ -0,0 +1,93 @@ +<% autogen_exception -%> +package container + +import ( +<% unless version == 'ga' -%> + "testing" + + container "google.golang.org/api/container/v1beta1" +<% end -%> +) + +<% unless version == 'ga' -%> +func TestValidateNodePoolAutoConfig(t *testing.T) { + withTags := &container.NodePoolAutoConfig{ + NetworkTags: &container.NetworkTags{ + Tags: []string{"not-empty"}, + }, + } + noTags := &container.NodePoolAutoConfig{} + + cases := map[string]struct { + Input *container.Cluster + ExpectError bool + }{ + "with tags, nap nil, autopilot nil": { + Input: &container.Cluster{NodePoolAutoConfig: withTags}, + ExpectError: true, + }, + "with tags, autopilot disabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: false}, + NodePoolAutoConfig: withTags, + }, + ExpectError: true, + }, + "with tags, nap disabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, + NodePoolAutoConfig: withTags, + }, + ExpectError: true, + }, + "with tags, autopilot enabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: true}, + NodePoolAutoConfig: withTags, + }, + ExpectError: false, + }, + "with tags, nap enabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, + NodePoolAutoConfig: withTags, + }, + ExpectError: false, + }, + "no tags, autopilot enabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: true}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, nap enabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, autopilot disabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: false}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, nap disabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + } + + for tn, tc := range cases { + if err := validateNodePoolAutoConfig(tc.Input); (err != nil) != tc.ExpectError { + t.Fatalf("bad: '%s', expected error: %t, received error: %t", tn, tc.ExpectError, (err != nil)) + } + } +} +<% end -%> diff --git a/mmv1/third_party/terraform/resources/resource_container_cluster_migrate.go b/mmv1/third_party/terraform/services/container/resource_container_cluster_migrate.go similarity index 98% rename from mmv1/third_party/terraform/resources/resource_container_cluster_migrate.go rename to mmv1/third_party/terraform/services/container/resource_container_cluster_migrate.go index 5b3d3336e1e4..2b088ce0321f 100644 --- a/mmv1/third_party/terraform/resources/resource_container_cluster_migrate.go +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migrate.go @@ -1,4 +1,4 @@ -package google +package container import ( "fmt" diff --git a/mmv1/third_party/terraform/tests/resource_container_cluster_migrate_test.go b/mmv1/third_party/terraform/services/container/resource_container_cluster_migrate_test.go similarity index 99% rename from mmv1/third_party/terraform/tests/resource_container_cluster_migrate_test.go rename to mmv1/third_party/terraform/services/container/resource_container_cluster_migrate_test.go index d3e2d43b3a81..bcaf02c133d9 100644 --- a/mmv1/third_party/terraform/tests/resource_container_cluster_migrate_test.go +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migrate_test.go @@ -1,4 +1,4 @@ -package google +package container import ( "testing" diff --git a/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb similarity index 97% rename from mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb rename to mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 874ed54e4787..4f473ee28778 100644 --- a/mmv1/third_party/terraform/resources/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -1,5 +1,5 @@ <% autogen_exception -%> -package google +package container import ( "fmt" @@ -1199,7 +1199,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id()) @@ -1237,7 +1237,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1291,7 +1291,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated tags for node pool %s", name) @@ -1328,7 +1328,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1366,7 +1366,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } // Call update serially. - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1398,7 +1398,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) @@ -1432,7 +1432,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) @@ -1465,7 +1465,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1498,7 +1498,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } @@ -1529,7 +1529,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node nodePoolInfo.location, "updating GKE node pool size", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] GKE node pool %s size has been updated to %d", name, newSize) @@ -1564,7 +1564,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node nodePoolInfo.location, "updating GKE node pool management", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated management in Node Pool %s", name) @@ -1591,7 +1591,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool version", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated version in Node Pool %s", name) @@ -1616,7 +1616,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated node locations in Node Pool %s", name) @@ -1696,7 +1696,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node // Wait until it's updated return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool upgrade settings", userAgent, timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) @@ -1727,7 +1727,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node timeout) } - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + if err := tpgresource.RetryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { return err } diff --git a/mmv1/third_party/terraform/resources/resource_container_node_pool_migrate.go b/mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate.go similarity index 98% rename from mmv1/third_party/terraform/resources/resource_container_node_pool_migrate.go rename to mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate.go index d5e81eab66f0..4a76a2f6ba36 100644 --- a/mmv1/third_party/terraform/resources/resource_container_node_pool_migrate.go +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate.go @@ -1,4 +1,4 @@ -package google +package container import ( "fmt" diff --git a/mmv1/third_party/terraform/tests/resource_container_node_pool_migrate_test.go b/mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate_test.go similarity index 98% rename from mmv1/third_party/terraform/tests/resource_container_node_pool_migrate_test.go rename to mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate_test.go index 28e0e899109b..689b4fe87e28 100644 --- a/mmv1/third_party/terraform/tests/resource_container_node_pool_migrate_test.go +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_migrate_test.go @@ -1,4 +1,4 @@ -package google +package container import ( "testing" diff --git a/mmv1/third_party/terraform/utils/state_util.go b/mmv1/third_party/terraform/services/container/state_util.go similarity index 97% rename from mmv1/third_party/terraform/utils/state_util.go rename to mmv1/third_party/terraform/services/container/state_util.go index bb79c126c547..1003578813e4 100644 --- a/mmv1/third_party/terraform/utils/state_util.go +++ b/mmv1/third_party/terraform/services/container/state_util.go @@ -1,4 +1,4 @@ -package google +package container // A StateType represents the specific type of resting state that a state value // is. diff --git a/mmv1/third_party/terraform/data_sources/data_source_container_registry_image.go b/mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_image.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_container_registry_image.go rename to mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_image.go index 644852053c19..2aac44a0d7c1 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_container_registry_image.go +++ b/mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_image.go @@ -1,4 +1,4 @@ -package google +package containeranalysis import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_container_registry_repository.go b/mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_repository.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_container_registry_repository.go rename to mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_repository.go index 3d1dc6aab5cc..561cb6588fe2 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_container_registry_repository.go +++ b/mmv1/third_party/terraform/services/containeranalysis/data_source_container_registry_repository.go @@ -1,4 +1,4 @@ -package google +package containeranalysis import ( "fmt" diff --git a/mmv1/third_party/terraform/resources/resource_container_registry.go b/mmv1/third_party/terraform/services/containeranalysis/resource_container_registry.go similarity index 99% rename from mmv1/third_party/terraform/resources/resource_container_registry.go rename to mmv1/third_party/terraform/services/containeranalysis/resource_container_registry.go index 14ee617dab6a..f19e1389bd0b 100644 --- a/mmv1/third_party/terraform/resources/resource_container_registry.go +++ b/mmv1/third_party/terraform/services/containeranalysis/resource_container_registry.go @@ -1,4 +1,4 @@ -package google +package containeranalysis import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_attached_install_manifest.go b/mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_install_manifest.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_attached_install_manifest.go rename to mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_install_manifest.go index 16e27737892d..a6dd6310fca4 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_attached_install_manifest.go +++ b/mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_install_manifest.go @@ -1,4 +1,4 @@ -package google +package containerattached import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_attached_versions.go b/mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_versions.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_attached_versions.go rename to mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_versions.go index 2f279755fe81..40c69fd4a5c1 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_attached_versions.go +++ b/mmv1/third_party/terraform/services/containerattached/data_source_google_container_attached_versions.go @@ -1,4 +1,4 @@ -package google +package containerattached import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_aws_versions.go b/mmv1/third_party/terraform/services/containeraws/data_source_google_container_aws_versions.go similarity index 99% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_aws_versions.go rename to mmv1/third_party/terraform/services/containeraws/data_source_google_container_aws_versions.go index a967d3480d0f..c48d29d2003f 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_aws_versions.go +++ b/mmv1/third_party/terraform/services/containeraws/data_source_google_container_aws_versions.go @@ -1,4 +1,4 @@ -package google +package containeraws import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_container_azure_versions.go b/mmv1/third_party/terraform/services/containerazure/data_source_google_container_azure_versions.go similarity index 98% rename from mmv1/third_party/terraform/data_sources/data_source_google_container_azure_versions.go rename to mmv1/third_party/terraform/services/containerazure/data_source_google_container_azure_versions.go index 3ff13f207454..f3f0e24cbceb 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_container_azure_versions.go +++ b/mmv1/third_party/terraform/services/containerazure/data_source_google_container_azure_versions.go @@ -1,4 +1,4 @@ -package google +package containerazure import ( "fmt" diff --git a/mmv1/third_party/terraform/data_sources/data_source_google_service_networking_peered_dns_domain.go b/mmv1/third_party/terraform/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go similarity index 96% rename from mmv1/third_party/terraform/data_sources/data_source_google_service_networking_peered_dns_domain.go rename to mmv1/third_party/terraform/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go index f08e5e50a451..f67bffc8fda2 100644 --- a/mmv1/third_party/terraform/data_sources/data_source_google_service_networking_peered_dns_domain.go +++ b/mmv1/third_party/terraform/services/servicenetworking/data_source_google_service_networking_peered_dns_domain.go @@ -1,4 +1,4 @@ -package google +package servicenetworking import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/mmv1/third_party/terraform/resources/resource_google_service_networking_peered_dns_domain.go b/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go similarity index 99% rename from mmv1/third_party/terraform/resources/resource_google_service_networking_peered_dns_domain.go rename to mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go index 0272ea9dc6d6..108d5d343588 100644 --- a/mmv1/third_party/terraform/resources/resource_google_service_networking_peered_dns_domain.go +++ b/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go @@ -1,4 +1,4 @@ -package google +package servicenetworking import ( "fmt" diff --git a/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go new file mode 100644 index 000000000000..ac1ff4a07912 --- /dev/null +++ b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go @@ -0,0 +1,422 @@ +package servicenetworking + +import ( + "fmt" + "log" + "net/url" + "regexp" + "strings" + "time" + + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/servicenetworking/v1" +) + +func ResourceServiceNetworkingConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceServiceNetworkingConnectionCreate, + Read: resourceServiceNetworkingConnectionRead, + Update: resourceServiceNetworkingConnectionUpdate, + Delete: resourceServiceNetworkingConnectionDelete, + Importer: &schema.ResourceImporter{ + State: resourceServiceNetworkingConnectionImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Name of VPC network connected with service producers using VPC peering.`, + }, + // NOTE(craigatgoogle): This field is weird, it's required to make the Insert/List calls as a parameter + // named "parent", however it's also defined in the response as an output field called "peering", which + // uses "-" as a delimiter instead of ".". To alleviate user confusion I've opted to model the gcloud + // CLI's approach, calling the field "service" and accepting the same format as the CLI with the "." + // delimiter. + // See: https://cloud.google.com/vpc/docs/configure-private-services-access#creating-connection + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Provider peering service that is managing peering connectivity for a service provider organization. For Google services that support this functionality it is 'servicenetworking.googleapis.com'.`, + }, + "reserved_peering_ranges": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Named IP address range(s) of PEERING type reserved for this service provider. Note that invoking this method with a different range when connection is already established will not reallocate already provisioned service producer subnetworks.`, + }, + "peering": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) + } + + connection := &servicenetworking.Connection{ + Network: serviceNetworkingNetworkName, + ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) + } + project := networkFieldValue.Project + + parentService := formatParentService(d.Get("service").(string)) + // We use Patch instead of Create, because we're getting + // "Error waiting for Create Service Networking Connection: + // Error code 9, message: Cannot modify allocated ranges in + // CreateConnection. Please use UpdateConnection." + // if we're creating peerings to more than one VPC (like two + // CloudSQL instances within one project, peered with two + // clusters.) + // + // This is a workaround for: + // https://issuetracker.google.com/issues/131908322 + // + // The API docs don't specify that you can do connections/-, + // but that's what gcloud does, and it's easier than grabbing + // the connection name. + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + project = bp + } + + createCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) + if config.UserProjectOverride { + createCall.Header().Add("X-Goog-User-Project", project) + } + op, err := createCall.Do() + if err != nil { + return err + } + + if err := ServiceNetworkingOperationWaitTime(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { + return err + } + + connectionId := &connectionId{ + Network: network, + Service: d.Get("service").(string), + } + + d.SetId(connectionId.Id()) + return resourceServiceNetworkingConnectionRead(d, meta) +} + +func resourceServiceNetworkingConnectionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + connectionId, err := parseConnectionId(d.Id()) + if err != nil { + return errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) + } + + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, connectionId.Network, userAgent) + if err != nil { + return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) + } + + network := d.Get("network").(string) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) + } + project := networkFieldValue.Project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + project = bp + } + + parentService := formatParentService(connectionId.Service) + readCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.List(parentService).Network(serviceNetworkingNetworkName) + if config.UserProjectOverride { + readCall.Header().Add("X-Goog-User-Project", project) + } + response, err := readCall.Do() + if err != nil { + return err + } + + var connection *servicenetworking.Connection + for _, c := range response.Connections { + if c.Network == serviceNetworkingNetworkName { + connection = c + break + } + } + + if connection == nil { + d.SetId("") + log.Printf("[WARNING] Failed to find Service Networking Connection, network: %s service: %s", connectionId.Network, connectionId.Service) + return nil + } + + if err := d.Set("network", connectionId.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("service", connectionId.Service); err != nil { + return fmt.Errorf("Error setting service: %s", err) + } + if err := d.Set("peering", connection.Peering); err != nil { + return fmt.Errorf("Error setting peering: %s", err) + } + if err := d.Set("reserved_peering_ranges", connection.ReservedPeeringRanges); err != nil { + return fmt.Errorf("Error setting reserved_peering_ranges: %s", err) + } + return nil +} + +func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + connectionId, err := parseConnectionId(d.Id()) + if err != nil { + return errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) + } + + parentService := formatParentService(connectionId.Service) + + if d.HasChange("reserved_peering_ranges") { + network := d.Get("network").(string) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + return errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) + } + + connection := &servicenetworking.Connection{ + Network: serviceNetworkingNetworkName, + ReservedPeeringRanges: tpgresource.ConvertStringArr(d.Get("reserved_peering_ranges").([]interface{})), + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) + } + project := networkFieldValue.Project + + // The API docs don't specify that you can do connections/-, but that's what gcloud does, + // and it's easier than grabbing the connection name. + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + project = bp + } + + patchCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) + if config.UserProjectOverride { + patchCall.Header().Add("X-Goog-User-Project", project) + } + op, err := patchCall.Do() + if err != nil { + return err + } + if err := ServiceNetworkingOperationWaitTime(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + } + return resourceServiceNetworkingConnectionRead(d, meta) +} + +func resourceServiceNetworkingConnectionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + serviceNetworkingNetworkName, err := RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + peering := d.Get("peering").(string) + obj["name"] = peering + url := fmt.Sprintf("%s%s/removePeering", config.ComputeBasePath, serviceNetworkingNetworkName) + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) + } + + project := networkFieldValue.Project + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ServiceNetworkingConnection %q", d.Id())) + } + + op := &compute.Operation{} + err = tpgresource.Convert(res, op) + if err != nil { + return err + } + + err = tpgcompute.ComputeOperationWaitTime( + config, op, project, "Updating Network", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + log.Printf("[INFO] Service network connection removed.") + + return nil +} + +func resourceServiceNetworkingConnectionImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + connectionId, err := parseConnectionId(d.Id()) + if err != nil { + return nil, err + } + + if err := d.Set("network", connectionId.Network); err != nil { + return nil, fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("service", connectionId.Service); err != nil { + return nil, fmt.Errorf("Error setting service: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +// NOTE(craigatgoogle): The Connection resource in this API doesn't have an Id field, so inorder +// to support the Read method, we create an Id using the tuple(Network, Service). +type connectionId struct { + Network string + Service string +} + +func (id *connectionId) Id() string { + return fmt.Sprintf("%s:%s", url.QueryEscape(id.Network), url.QueryEscape(id.Service)) +} + +func parseConnectionId(id string) (*connectionId, error) { + res := strings.Split(id, ":") + + if len(res) != 2 { + return nil, fmt.Errorf("Failed to parse service networking connection id, value: %s", id) + } + + network, err := url.QueryUnescape(res[0]) + if err != nil { + return nil, errwrap.Wrapf("Failed to parse service networking connection id, invalid network, err: {{err}}", err) + } else if len(network) == 0 { + return nil, fmt.Errorf("Failed to parse service networking connection id, empty network") + } + + service, err := url.QueryUnescape(res[1]) + if err != nil { + return nil, errwrap.Wrapf("Failed to parse service networking connection id, invalid service, err: {{err}}", err) + } else if len(service) == 0 { + return nil, fmt.Errorf("Failed to parse service networking connection id, empty service") + } + + return &connectionId{ + Network: network, + Service: service, + }, nil +} + +// NOTE(craigatgoogle): An out of band aspect of this API is that it uses a unique formatting of network +// different from the standard self_link URI. It requires a call to the resource manager to get the project +// number for the current project. +func RetrieveServiceNetworkingNetworkName(d *schema.ResourceData, config *transport_tpg.Config, network, userAgent string) (string, error) { + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return "", errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) + } + + pid := networkFieldValue.Project + if pid == "" { + return "", fmt.Errorf("Could not determine project") + } + log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") + // err == nil indicates that the billing_project value was found + billingProject := pid + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getProjectCall := config.NewResourceManagerClient(userAgent).Projects.Get(pid) + if config.UserProjectOverride { + getProjectCall.Header().Add("X-Goog-User-Project", billingProject) + } + project, err := getProjectCall.Do() + if err != nil { + // note: returning a wrapped error is part of this method's contract! + // https://blog.golang.org/go1.13-errors + return "", fmt.Errorf("Failed to retrieve project, pid: %s, err: %w", pid, err) + } + + networkName := networkFieldValue.Name + if networkName == "" { + return "", fmt.Errorf("Failed to parse network") + } + + // return the network name formatting unique to this API + return fmt.Sprintf("projects/%v/global/networks/%v", project.ProjectNumber, networkName), nil + +} + +const parentServicePattern = "^services/.+$" + +// NOTE(craigatgoogle): An out of band aspect of this API is that it requires the service name to be +// formatted as "services/" +func formatParentService(service string) string { + r := regexp.MustCompile(parentServicePattern) + if !r.MatchString(service) { + return fmt.Sprintf("services/%s", service) + } else { + return service + } +} diff --git a/mmv1/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb b/mmv1/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb index 4d0a0d44895b..4f1e6525dcca 100644 --- a/mmv1/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb +++ b/mmv1/third_party/terraform/tests/resource_cloudfunctions_function_test.go.erb @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + tpgcloudfunctions "github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions" "google.golang.org/api/cloudfunctions/v1" ) @@ -38,218 +39,6 @@ func init() { }) } -func TestCloudFunctionsFunction_nameValidator(t *testing.T) { - validNames := []string{ - "a", - "aA", - "a0", - "has-hyphen", - "has_underscore", - "hasUpperCase", - "allChars_-A0", - "StartsUpperCase", - "endsUpperCasE", - } - for _, tc := range validNames { - wrns, errs := validateResourceCloudFunctionsFunctionName(tc, "function.name") - if len(wrns) > 0 { - t.Errorf("Expected no validation warnings for test case %q, got: %+v", tc, wrns) - } - if len(errs) > 0 { - t.Errorf("Expected no validation errors for test name %q, got: %+v", tc, errs) - } - } - - invalidNames := []string{ - "0startsWithNumber", - "endsWith_", - "endsWith-", - "bad*Character", - "aCloudFunctionsFunctionNameThatIsSeventyFiveCharactersLongWhichIsMoreThan63", - } - for _, tc := range invalidNames { - _, errs := validateResourceCloudFunctionsFunctionName(tc, "function.name") - if len(errs) == 0 { - t.Errorf("Expected errors for invalid test name %q, got none", tc) - } - } -} - -func TestValidLabelKeys(t *testing.T) { - testCases := []struct { - labelKey string - valid bool - }{ - { - "test-label", true, - }, - { - "test_label", true, - }, - { - "MixedCase", false, - }, - { - "number-09-dash", true, - }, - { - "", false, - }, - { - "test-label", true, - }, - { - "mixed*symbol", false, - }, - { - "intérnätional", true, - }, - } - - for _, tc := range testCases { - labels := make(map[string]interface{}) - labels[tc.labelKey] = "test value" - - _, errs := labelKeyValidator(labels, "") - if tc.valid && len(errs) > 0 { - t.Errorf("Validation failure, key: '%s' should be valid but actual errors were %q", tc.labelKey, errs) - } - if !tc.valid && len(errs) < 1 { - t.Errorf("Validation failure, key: '%s' should fail but actual errors were %q", tc.labelKey, errs) - } - } -} - -func TestCompareSelfLinkOrResourceNameWithMultipleParts(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "projects to no projects doc": { - Old: "projects/myproject/databases/default/documents/resource", - New: "resource", - ExpectDiffSuppress: true, - }, - "no projects to projects doc": { - Old: "resource", - New: "projects/myproject/databases/default/documents/resource", - ExpectDiffSuppress: true, - }, - "projects to projects doc": { - Old: "projects/myproject/databases/default/documents/resource", - New: "projects/myproject/databases/default/documents/resource", - ExpectDiffSuppress: true, - }, - "multi messages doc": { - Old: "messages/{messageId}", - New: "projects/myproject/databases/(default)/documents/messages/{messageId}", - ExpectDiffSuppress: true, - }, - "multi messages 2 doc": { - Old: "projects/myproject/databases/(default)/documents/messages/{messageId}", - New: "messages/{messageId}", - ExpectDiffSuppress: true, - }, - "projects to no projects topics": { - Old: "projects/myproject/topics/resource", - New: "resource", - ExpectDiffSuppress: true, - }, - "no projects to projects topics": { - Old: "resource", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: true, - }, - "projects to projects topics": { - Old: "projects/myproject/topics/resource", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: true, - }, - - "unmatched projects to no projects doc": { - Old: "projects/myproject/databases/default/documents/resource", - New: "resourcex", - ExpectDiffSuppress: false, - }, - "unmatched no projects to projects doc": { - Old: "resourcex", - New: "projects/myproject/databases/default/documents/resource", - ExpectDiffSuppress: false, - }, - "unmatched projects to projects doc": { - Old: "projects/myproject/databases/default/documents/resource", - New: "projects/myproject/databases/default/documents/resourcex", - ExpectDiffSuppress: false, - }, - "unmatched projects to projects 2 doc": { - Old: "projects/myprojectx/databases/default/documents/resource", - New: "projects/myproject/databases/default/documents/resource", - ExpectDiffSuppress: false, - }, - "unmatched projects to empty doc": { - Old: "", - New: "projects/myproject/databases/default/documents/resource", - ExpectDiffSuppress: false, - }, - "unmatched empty to projects 2 doc": { - Old: "projects/myprojectx/databases/default/documents/resource", - New: "", - ExpectDiffSuppress: false, - }, - "unmatched default to default2 doc": { - Old: "projects/myproject/databases/default/documents/resource", - New: "projects/myproject/databases/default2/documents/resource", - ExpectDiffSuppress: false, - }, - "unmatched projects to no projects topics": { - Old: "projects/myproject/topics/resource", - New: "resourcex", - ExpectDiffSuppress: false, - }, - "unmatched no projects to projects topics": { - Old: "resourcex", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: false, - }, - "unmatched projects to projects topics": { - Old: "projects/myproject/topics/resource", - New: "projects/myproject/topics/resourcex", - ExpectDiffSuppress: false, - }, - "unmatched projects to projects 2 topics": { - Old: "projects/myprojectx/topics/resource", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: false, - }, - "unmatched projects to empty topics": { - Old: "projects/myproject/topics/resource", - New: "", - ExpectDiffSuppress: false, - }, - "unmatched empty to projects topics": { - Old: "", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: false, - }, - "unmatched resource to resource-partial": { - Old: "resource", - New: "resource-partial", - ExpectDiffSuppress: false, - }, - "unmatched resource-partial to projects": { - Old: "resource-partial", - New: "projects/myproject/topics/resource", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if compareSelfLinkOrResourceNameWithMultipleParts("resource", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestAccCloudFunctionsFunction_basic(t *testing.T) { t.Parallel() @@ -759,12 +548,12 @@ func testAccCheckCloudFunctionsFunctionDestroyProducer(t *testing.T) func(s *ter name := rs.Primary.Attributes["name"] project := rs.Primary.Attributes["project"] region := rs.Primary.Attributes["region"] - cloudFuncId := &cloudFunctionId{ + cloudFuncId := &tpgcloudfunctions.CloudFunctionId{ Project: project, Region: region, Name: name, } - _, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() + _, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() if err == nil { return fmt.Errorf("Function still exists") } @@ -789,12 +578,12 @@ func testAccCloudFunctionsFunctionExists(t *testing.T, n string, function *cloud name := rs.Primary.Attributes["name"] project := rs.Primary.Attributes["project"] region := rs.Primary.Attributes["region"] - cloudFuncId := &cloudFunctionId{ + cloudFuncId := &tpgcloudfunctions.CloudFunctionId{ Project: project, Region: region, Name: name, } - found, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() + found, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() if err != nil { return fmt.Errorf("CloudFunctions Function not present") } diff --git a/mmv1/third_party/terraform/tests/resource_composer_environment_test.go.erb b/mmv1/third_party/terraform/tests/resource_composer_environment_test.go.erb index a537c6672c95..5e2f5c8996d1 100644 --- a/mmv1/third_party/terraform/tests/resource_composer_environment_test.go.erb +++ b/mmv1/third_party/terraform/tests/resource_composer_environment_test.go.erb @@ -6,6 +6,7 @@ import ( "fmt" "github.com/hashicorp/terraform-provider-google/google/acctest" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/composer" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "testing" @@ -40,36 +41,6 @@ func allComposerServiceAgents() []string { } } -func TestComposerImageVersionDiffSuppress(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - old string - new string - expected bool - }{ - {"matches", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.10.0", true}, - {"preview matches", "composer-1.17.0-preview.0-airflow-2.0.1", "composer-1.17.0-preview.0-airflow-2.0.1", true}, - {"old latest", "composer-latest-airflow-1.10.0", "composer-1.4.1-airflow-1.10.0", true}, - {"new latest", "composer-1.4.1-airflow-1.10.0", "composer-latest-airflow-1.10.0", true}, - {"composer major alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1-airflow-1.10", true}, - {"composer major alias different", "composer-1.4.0-airflow-2.1.4", "composer-2-airflow-2.2", false}, - {"composer different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.1-airflow-1.10.0", false}, - {"airflow major alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1", true}, - {"airflow major alias different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-2", false}, - {"airflow major.minor alias equivalent", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.10", true}, - {"airflow major.minor alias different", "composer-1.4.0-airflow-2.1.4", "composer-1.4.0-airflow-2.2", false}, - {"airflow different", "composer-1.4.0-airflow-1.10.0", "composer-1.4.0-airflow-1.9.0", false}, - } - - for _, tc := range cases { - if actual := composerImageVersionDiffSuppress("", tc.old, tc.new, nil); actual != tc.expected { - t.Errorf("'%s' failed, expected %v but got %v", tc.name, tc.expected, actual) - } - } -} - // Checks environment creation with minimum required information. func TestAccComposerEnvironment_basic(t *testing.T) { t.Parallel() @@ -1034,15 +1005,15 @@ func testAccComposerEnvironmentDestroyProducer(t *testing.T) func(s *terraform.S if len(idTokens) != 6 { return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}", rs.Primary.ID) } - envName := &composerEnvironmentName{ + envName := &composer.ComposerEnvironmentName{ Project: idTokens[1], Region: idTokens[3], Environment: idTokens[5], } - _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() + _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() if err == nil { - return fmt.Errorf("environment %s still exists", envName.resourceName()) + return fmt.Errorf("environment %s still exists", envName.ResourceName()) } } diff --git a/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb index 22ef861d6ac3..a099847baf11 100644 --- a/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/tests/resource_container_cluster_test.go.erb @@ -13,10 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" - -<% unless version == 'ga' -%> - container "google.golang.org/api/container/v1beta1" -<% end -%> ) func init() { @@ -7577,87 +7573,3 @@ resource "google_container_cluster" "primary" { `, name) } <% end -%> - - -<% unless version == 'ga' -%> -func TestValidateNodePoolAutoConfig(t *testing.T) { - withTags := &container.NodePoolAutoConfig{ - NetworkTags: &container.NetworkTags{ - Tags: []string{"not-empty"}, - }, - } - noTags := &container.NodePoolAutoConfig{} - - cases := map[string]struct { - Input *container.Cluster - ExpectError bool - }{ - "with tags, nap nil, autopilot nil": { - Input: &container.Cluster{NodePoolAutoConfig: withTags}, - ExpectError: true, - }, - "with tags, autopilot disabled": { - Input: &container.Cluster{ - Autopilot: &container.Autopilot{Enabled: false}, - NodePoolAutoConfig: withTags, - }, - ExpectError: true, - }, - "with tags, nap disabled": { - Input: &container.Cluster{ - Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, - NodePoolAutoConfig: withTags, - }, - ExpectError: true, - }, - "with tags, autopilot enabled": { - Input: &container.Cluster{ - Autopilot: &container.Autopilot{Enabled: true}, - NodePoolAutoConfig: withTags, - }, - ExpectError: false, - }, - "with tags, nap enabled": { - Input: &container.Cluster{ - Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, - NodePoolAutoConfig: withTags, - }, - ExpectError: false, - }, - "no tags, autopilot enabled": { - Input: &container.Cluster{ - Autopilot: &container.Autopilot{Enabled: true}, - NodePoolAutoConfig: noTags, - }, - ExpectError: false, - }, - "no tags, nap enabled": { - Input: &container.Cluster{ - Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, - NodePoolAutoConfig: noTags, - }, - ExpectError: false, - }, - "no tags, autopilot disabled": { - Input: &container.Cluster{ - Autopilot: &container.Autopilot{Enabled: false}, - NodePoolAutoConfig: noTags, - }, - ExpectError: false, - }, - "no tags, nap disabled": { - Input: &container.Cluster{ - Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, - NodePoolAutoConfig: noTags, - }, - ExpectError: false, - }, - } - - for tn, tc := range cases { - if err := validateNodePoolAutoConfig(tc.Input); (err != nil) != tc.ExpectError { - t.Fatalf("bad: '%s', expected error: %t, received error: %t", tn, tc.ExpectError, (err != nil)) - } - } -} -<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb b/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb index cf435835a5a8..b02b0204f400 100644 --- a/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb +++ b/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb @@ -268,4 +268,9 @@ func CompareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { } return false +} + +func CidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If the user specified a size and the API returned a full cidr block, suppress. + return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) } \ No newline at end of file diff --git a/mmv1/third_party/terraform/tpgresource/utils.go b/mmv1/third_party/terraform/tpgresource/utils.go index c11ab999f401..0f56763af2eb 100644 --- a/mmv1/third_party/terraform/tpgresource/utils.go +++ b/mmv1/third_party/terraform/tpgresource/utils.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/errwrap" fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "google.golang.org/api/googleapi" @@ -511,6 +512,22 @@ func CheckGoogleIamPolicy(value string) error { return nil } +// Retries an operation while the canonical error code is FAILED_PRECONDTION +// which indicates there is an incompatible operation already running on the +// cluster. This error can be safely retried until the incompatible operation +// completes, and the newly requested operation can begin. +func RetryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { + return resource.Retry(timeout, func() *resource.RetryError { + if err := transport_tpg.LockedCall(lockKey, f); err != nil { + if IsFailedPreconditionError(err) { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) +} + func FrameworkDiagsToSdkDiags(fwD fwDiags.Diagnostics) *diag.Diagnostics { var diags diag.Diagnostics for _, e := range fwD.Errors() { diff --git a/mmv1/third_party/terraform/utils/common_diff_suppress.go.erb b/mmv1/third_party/terraform/utils/common_diff_suppress.go.erb index 802000ea5d6f..45c7e3927f6d 100644 --- a/mmv1/third_party/terraform/utils/common_diff_suppress.go.erb +++ b/mmv1/third_party/terraform/utils/common_diff_suppress.go.erb @@ -180,3 +180,7 @@ func ProjectNumberDiffSuppress(k, old, new string, d *schema.ResourceData) bool func compareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { return tpgresource.CompareCryptoKeyVersions("", old, new, nil) } + +func cidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return tpgresource.CidrOrSizeDiffSuppress(k, old, new, d) +} diff --git a/mmv1/third_party/terraform/utils/provider.go.erb b/mmv1/third_party/terraform/utils/provider.go.erb index 54a099aaaebf..7682bd8f07cc 100644 --- a/mmv1/third_party/terraform/utils/provider.go.erb +++ b/mmv1/third_party/terraform/utils/provider.go.erb @@ -30,6 +30,11 @@ import ( <% end -%> <% end -%> + "github.com/hashicorp/terraform-provider-google/google/services/composer" + "github.com/hashicorp/terraform-provider-google/google/services/container" + "github.com/hashicorp/terraform-provider-google/google/services/containeraws" + "github.com/hashicorp/terraform-provider-google/google/services/containerazure" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-provider-google/google/verify" @@ -214,17 +219,17 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_billing_account": billing.DataSourceGoogleBillingAccount(), "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), - "google_cloudfunctions_function": DataSourceGoogleCloudFunctionsFunction(), + "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), <% unless version == 'ga' -%> "google_cloud_asset_resources_search_all": DataSourceGoogleCloudAssetResourcesSearchAll(), <% end -%> "google_cloud_identity_groups": cloudidentity.DataSourceGoogleCloudIdentityGroups(), "google_cloud_identity_group_memberships": cloudidentity.DataSourceGoogleCloudIdentityGroupMemberships(), - "google_cloud_run_locations": DataSourceGoogleCloudRunLocations(), + "google_cloud_run_locations": cloudrun.DataSourceGoogleCloudRunLocations(), "google_cloud_run_service": cloudrun.DataSourceGoogleCloudRunService(), - "google_composer_environment": DataSourceGoogleComposerEnvironment(), - "google_composer_image_versions": DataSourceGoogleComposerImageVersions(), + "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), + "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), "google_compute_address": compute.DataSourceGoogleComputeAddress(), "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), @@ -264,14 +269,14 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_subnetwork": compute.DataSourceGoogleComputeSubnetwork(), "google_compute_vpn_gateway": compute.DataSourceGoogleComputeVpnGateway(), "google_compute_zones": compute.DataSourceGoogleComputeZones(), - "google_container_azure_versions": DataSourceGoogleContainerAzureVersions(), - "google_container_aws_versions": DataSourceGoogleContainerAwsVersions(), - "google_container_attached_versions": DataSourceGoogleContainerAttachedVersions(), - "google_container_attached_install_manifest": DataSourceGoogleContainerAttachedInstallManifest(), - "google_container_cluster": DataSourceGoogleContainerCluster(), - "google_container_engine_versions": DataSourceGoogleContainerEngineVersions(), - "google_container_registry_image": DataSourceGoogleContainerImage(), - "google_container_registry_repository": DataSourceGoogleContainerRepo(), + "google_container_azure_versions": containerazure.DataSourceGoogleContainerAzureVersions(), + "google_container_aws_versions": containeraws.DataSourceGoogleContainerAwsVersions(), + "google_container_attached_versions": containerattached.DataSourceGoogleContainerAttachedVersions(), + "google_container_attached_install_manifest": containerattached.DataSourceGoogleContainerAttachedInstallManifest(), + "google_container_cluster": container.DataSourceGoogleContainerCluster(), + "google_container_engine_versions": container.DataSourceGoogleContainerEngineVersions(), + "google_container_registry_image": containeranalysis.DataSourceGoogleContainerImage(), + "google_container_registry_repository": containeranalysis.DataSourceGoogleContainerRepo(), "google_dataproc_metastore_service": dataprocmetastore.DataSourceDataprocMetastoreService(), "google_datastream_static_ips": DataSourceGoogleDatastreamStaticIps(), "google_game_services_game_server_deployment_rollout": gameservices.DataSourceGameServicesGameServerDeploymentRollout(), @@ -336,7 +341,7 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_sql_database": sql.DataSourceSqlDatabase(), "google_sql_database_instance": DataSourceSqlDatabaseInstance(), "google_sql_database_instances": DataSourceSqlDatabaseInstances(), - "google_service_networking_peered_dns_domain": DataSourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_service_networking_peered_dns_domain": servicenetworking.DataSourceGoogleServiceNetworkingPeeredDNSDomain(), "google_storage_bucket": DataSourceGoogleStorageBucket(), "google_storage_bucket_object": DataSourceGoogleStorageBucketObject(), "google_storage_bucket_object_content": DataSourceGoogleStorageBucketObjectContent(), @@ -477,8 +482,8 @@ end # products.each do "google_bigtable_instance": bigtable.ResourceBigtableInstance(), "google_bigtable_table": bigtable.ResourceBigtableTable(), "google_billing_subaccount": resourcemanager.ResourceBillingSubaccount(), - "google_cloudfunctions_function": ResourceCloudFunctionsFunction(), - "google_composer_environment": ResourceComposerEnvironment(), + "google_cloudfunctions_function": cloudfunctions.ResourceCloudFunctionsFunction(), + "google_composer_environment": composer.ResourceComposerEnvironment(), "google_compute_attached_disk": compute.ResourceComputeAttachedDisk(), "google_compute_instance": compute.ResourceComputeInstance(), <% unless version == 'ga' -%> @@ -502,9 +507,9 @@ end # products.each do "google_compute_shared_vpc_host_project": compute.ResourceComputeSharedVpcHostProject(), "google_compute_shared_vpc_service_project": compute.ResourceComputeSharedVpcServiceProject(), "google_compute_target_pool": compute.ResourceComputeTargetPool(), - "google_container_cluster": ResourceContainerCluster(), - "google_container_node_pool": ResourceContainerNodePool(), - "google_container_registry": ResourceContainerRegistry(), + "google_container_cluster": container.ResourceContainerCluster(), + "google_container_node_pool": container.ResourceContainerNodePool(), + "google_container_registry": containeranalysis.ResourceContainerRegistry(), "google_dataflow_job": ResourceDataflowJob(), <% unless version == 'ga' -%> "google_dataflow_flex_template_job": ResourceDataflowFlexTemplateJob(), @@ -533,7 +538,7 @@ end # products.each do <% unless version == 'ga' -%> "google_project_service_identity": resourcemanager.ResourceProjectServiceIdentity(), <% end -%> - "google_service_networking_connection": ResourceServiceNetworkingConnection(), + "google_service_networking_connection": servicenetworking.ResourceServiceNetworkingConnection(), "google_sql_database_instance": ResourceSqlDatabaseInstance(), "google_sql_ssl_cert": ResourceSqlSslCert(), "google_sql_user": ResourceSqlUser(), @@ -551,7 +556,7 @@ end # products.each do <% end -%> "google_service_account": resourcemanager.ResourceGoogleServiceAccount(), "google_service_account_key": resourcemanager.ResourceGoogleServiceAccountKey(), - "google_service_networking_peered_dns_domain": ResourceGoogleServiceNetworkingPeeredDNSDomain(), + "google_service_networking_peered_dns_domain": servicenetworking.ResourceGoogleServiceNetworkingPeeredDNSDomain(), "google_storage_bucket": ResourceStorageBucket(), "google_storage_bucket_acl": ResourceStorageBucketAcl(), "google_storage_bucket_object": ResourceStorageBucketObject(), diff --git a/mmv1/third_party/terraform/utils/utils.go b/mmv1/third_party/terraform/utils/utils.go index 03fb89db9f8a..3f27c9ab1f0b 100644 --- a/mmv1/third_party/terraform/utils/utils.go +++ b/mmv1/third_party/terraform/utils/utils.go @@ -12,7 +12,6 @@ import ( fwDiags "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "google.golang.org/api/googleapi" @@ -330,15 +329,7 @@ func checkGoogleIamPolicy(value string) error { // cluster. This error can be safely retried until the incompatible operation // completes, and the newly requested operation can begin. func retryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { - return resource.Retry(timeout, func() *resource.RetryError { - if err := transport_tpg.LockedCall(lockKey, f); err != nil { - if isFailedPreconditionError(err) { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) + return tpgresource.RetryWhileIncompatibleOperation(timeout, lockKey, f) } // Deprecated: For backward compatibility frameworkDiagsToSdkDiags is still working,