diff --git a/internal/common/client_options.go b/internal/common/client_options.go index 6eba6eeaedcc0..1b5c831b5d3b4 100644 --- a/internal/common/client_options.go +++ b/internal/common/client_options.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/go-azure-helpers/sender" "github.com/hashicorp/go-azure-sdk/sdk/auth" "github.com/hashicorp/go-azure-sdk/sdk/client" - "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" "github.com/hashicorp/go-azure-sdk/sdk/environments" "github.com/hashicorp/terraform-plugin-sdk/v2/meta" "github.com/hashicorp/terraform-provider-azurerm/internal/features" @@ -66,24 +65,20 @@ type ClientOptions struct { } // Configure set up a resourcemanager.Client using an auth.Authorizer from hashicorp/go-azure-sdk -func (o ClientOptions) Configure(c *resourcemanager.Client, authorizer auth.Authorizer) { - c.Authorizer = authorizer - c.UserAgent = userAgent(c.UserAgent, o.TerraformVersion, o.PartnerId, o.DisableTerraformPartnerID) +func (o ClientOptions) Configure(c client.BaseClient, authorizer auth.Authorizer) { + c.SetAuthorizer(authorizer) + c.SetUserAgent(userAgent(c.GetUserAgent(), o.TerraformVersion, o.PartnerId, o.DisableTerraformPartnerID)) - requestMiddlewares := make([]client.RequestMiddleware, 0) if !o.DisableCorrelationRequestID { id := o.CustomCorrelationRequestID if id == "" { id = correlationRequestID() } - requestMiddlewares = append(requestMiddlewares, correlationRequestIDMiddleware(id)) + c.AppendRequestMiddleware(correlationRequestIDMiddleware(id)) } - requestMiddlewares = append(requestMiddlewares, requestLoggerMiddleware("AzureRM")) - c.RequestMiddlewares = &requestMiddlewares - c.ResponseMiddlewares = &[]client.ResponseMiddleware{ - responseLoggerMiddleware("AzureRM"), - } + c.AppendRequestMiddleware(requestLoggerMiddleware("AzureRM")) + c.AppendResponseMiddleware(responseLoggerMiddleware("AzureRM")) } // ConfigureClient sets up an autorest.Client using an autorest.Authorizer diff --git a/internal/provider/helpers.go b/internal/provider/helpers.go new file mode 100644 index 0000000000000..eb1ce774a23d3 --- /dev/null +++ b/internal/provider/helpers.go @@ -0,0 +1,128 @@ +package provider + +import ( + "encoding/base64" + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func decodeCertificate(clientCertificate string) ([]byte, error) { + var pfx []byte + if clientCertificate != "" { + out := make([]byte, base64.StdEncoding.DecodedLen(len(clientCertificate))) + n, err := base64.StdEncoding.Decode(out, []byte(clientCertificate)) + if err != nil { + return pfx, fmt.Errorf("could not decode client certificate data: %v", err) + } + pfx = out[:n] + } + return pfx, nil +} + +func getOidcToken(d *schema.ResourceData) (*string, error) { + idToken := strings.TrimSpace(d.Get("oidc_token").(string)) + + if path := d.Get("oidc_token_file_path").(string); path != "" { + fileTokenRaw, err := os.ReadFile(path) + + if err != nil { + return nil, fmt.Errorf("reading OIDC Token from file %q: %v", path, err) + } + + fileToken := strings.TrimSpace(string(fileTokenRaw)) + + if idToken != "" && idToken != fileToken { + return nil, fmt.Errorf("mismatch between supplied OIDC token and supplied OIDC token file contents - please either remove one or ensure they match") + } + + idToken = fileToken + } + + if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_FEDERATED_TOKEN_FILE") != "" { + path := os.Getenv("AZURE_FEDERATED_TOKEN_FILE") + fileTokenRaw, err := os.ReadFile(os.Getenv("AZURE_FEDERATED_TOKEN_FILE")) + + if err != nil { + return nil, fmt.Errorf("reading OIDC Token from file %q provided by AKS Workload Identity: %v", path, err) + } + + fileToken := strings.TrimSpace(string(fileTokenRaw)) + + if idToken != "" && idToken != fileToken { + return nil, fmt.Errorf("mismatch between supplied OIDC token and OIDC token file contents provided by AKS Workload Identity - please either remove one, ensure they match, or disable use_aks_workload_identity") + } + + idToken = fileToken + } + + return &idToken, nil +} + +func getClientId(d *schema.ResourceData) (*string, error) { + clientId := strings.TrimSpace(d.Get("client_id").(string)) + + if path := d.Get("client_id_file_path").(string); path != "" { + fileClientIdRaw, err := os.ReadFile(path) + + if err != nil { + return nil, fmt.Errorf("reading Client ID from file %q: %v", path, err) + } + + fileClientId := strings.TrimSpace(string(fileClientIdRaw)) + + if clientId != "" && clientId != fileClientId { + return nil, fmt.Errorf("mismatch between supplied Client ID and supplied Client ID file contents - please either remove one or ensure they match") + } + + clientId = fileClientId + } + + if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_CLIENT_ID") != "" { + aksClientId := os.Getenv("AZURE_CLIENT_ID") + if clientId != "" && clientId != aksClientId { + return nil, fmt.Errorf("mismatch between supplied Client ID and that provided by AKS Workload Identity - please remove, ensure they match, or disable use_aks_workload_identity") + } + clientId = aksClientId + } + + return &clientId, nil +} + +func getClientSecret(d *schema.ResourceData) (*string, error) { + clientSecret := strings.TrimSpace(d.Get("client_secret").(string)) + + if path := d.Get("client_secret_file_path").(string); path != "" { + fileSecretRaw, err := os.ReadFile(path) + + if err != nil { + return nil, fmt.Errorf("reading Client Secret from file %q: %v", path, err) + } + + fileSecret := strings.TrimSpace(string(fileSecretRaw)) + + if clientSecret != "" && clientSecret != fileSecret { + return nil, fmt.Errorf("mismatch between supplied Client Secret and supplied Client Secret file contents - please either remove one or ensure they match") + } + + clientSecret = fileSecret + } + + return &clientSecret, nil +} + +func getTenantId(d *schema.ResourceData) (*string, error) { + tenantId := strings.TrimSpace(d.Get("tenant_id").(string)) + + if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_TENANT_ID") != "" { + aksTenantId := os.Getenv("AZURE_TENANT_ID") + if tenantId != "" && tenantId != aksTenantId { + return nil, fmt.Errorf("mismatch between supplied Tenant ID and that provided by AKS Workload Identity - please remove, ensure they match, or disable use_aks_workload_identity") + } + tenantId = aksTenantId + } + + return &tenantId, nil +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 5c2d5fc23fe8b..4c6fbac839488 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -5,7 +5,6 @@ package provider import ( "context" - "encoding/base64" "fmt" "log" "os" @@ -510,124 +509,6 @@ func buildClient(ctx context.Context, p *schema.Provider, d *schema.ResourceData return client, nil } -func decodeCertificate(clientCertificate string) ([]byte, error) { - var pfx []byte - if clientCertificate != "" { - out := make([]byte, base64.StdEncoding.DecodedLen(len(clientCertificate))) - n, err := base64.StdEncoding.Decode(out, []byte(clientCertificate)) - if err != nil { - return pfx, fmt.Errorf("could not decode client certificate data: %v", err) - } - pfx = out[:n] - } - return pfx, nil -} - -func getOidcToken(d *schema.ResourceData) (*string, error) { - idToken := strings.TrimSpace(d.Get("oidc_token").(string)) - - if path := d.Get("oidc_token_file_path").(string); path != "" { - fileTokenRaw, err := os.ReadFile(path) - - if err != nil { - return nil, fmt.Errorf("reading OIDC Token from file %q: %v", path, err) - } - - fileToken := strings.TrimSpace(string(fileTokenRaw)) - - if idToken != "" && idToken != fileToken { - return nil, fmt.Errorf("mismatch between supplied OIDC token and supplied OIDC token file contents - please either remove one or ensure they match") - } - - idToken = fileToken - } - - if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_FEDERATED_TOKEN_FILE") != "" { - path := os.Getenv("AZURE_FEDERATED_TOKEN_FILE") - fileTokenRaw, err := os.ReadFile(os.Getenv("AZURE_FEDERATED_TOKEN_FILE")) - - if err != nil { - return nil, fmt.Errorf("reading OIDC Token from file %q provided by AKS Workload Identity: %v", path, err) - } - - fileToken := strings.TrimSpace(string(fileTokenRaw)) - - if idToken != "" && idToken != fileToken { - return nil, fmt.Errorf("mismatch between supplied OIDC token and OIDC token file contents provided by AKS Workload Identity - please either remove one, ensure they match, or disable use_aks_workload_identity") - } - - idToken = fileToken - } - - return &idToken, nil -} - -func getClientId(d *schema.ResourceData) (*string, error) { - clientId := strings.TrimSpace(d.Get("client_id").(string)) - - if path := d.Get("client_id_file_path").(string); path != "" { - fileClientIdRaw, err := os.ReadFile(path) - - if err != nil { - return nil, fmt.Errorf("reading Client ID from file %q: %v", path, err) - } - - fileClientId := strings.TrimSpace(string(fileClientIdRaw)) - - if clientId != "" && clientId != fileClientId { - return nil, fmt.Errorf("mismatch between supplied Client ID and supplied Client ID file contents - please either remove one or ensure they match") - } - - clientId = fileClientId - } - - if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_CLIENT_ID") != "" { - aksClientId := os.Getenv("AZURE_CLIENT_ID") - if clientId != "" && clientId != aksClientId { - return nil, fmt.Errorf("mismatch between supplied Client ID and that provided by AKS Workload Identity - please remove, ensure they match, or disable use_aks_workload_identity") - } - clientId = aksClientId - } - - return &clientId, nil -} - -func getClientSecret(d *schema.ResourceData) (*string, error) { - clientSecret := strings.TrimSpace(d.Get("client_secret").(string)) - - if path := d.Get("client_secret_file_path").(string); path != "" { - fileSecretRaw, err := os.ReadFile(path) - - if err != nil { - return nil, fmt.Errorf("reading Client Secret from file %q: %v", path, err) - } - - fileSecret := strings.TrimSpace(string(fileSecretRaw)) - - if clientSecret != "" && clientSecret != fileSecret { - return nil, fmt.Errorf("mismatch between supplied Client Secret and supplied Client Secret file contents - please either remove one or ensure they match") - } - - clientSecret = fileSecret - } - - return &clientSecret, nil -} - -func getTenantId(d *schema.ResourceData) (*string, error) { - tenantId := strings.TrimSpace(d.Get("tenant_id").(string)) - - if d.Get("use_aks_workload_identity").(bool) && os.Getenv("AZURE_TENANT_ID") != "" { - aksTenantId := os.Getenv("AZURE_TENANT_ID") - if tenantId != "" && tenantId != aksTenantId { - return nil, fmt.Errorf("mismatch between supplied Tenant ID and that provided by AKS Workload Identity - please remove, ensure they match, or disable use_aks_workload_identity") - } - tenantId = aksTenantId - } - - return &tenantId, nil -} - const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered. Terraform automatically attempts to register the Resource Providers it supports to diff --git a/internal/services/arckubernetes/arc_kubernetes_flux_configuration_resource.go b/internal/services/arckubernetes/arc_kubernetes_flux_configuration_resource.go index ccdac918aa825..5ec30309b0e57 100644 --- a/internal/services/arckubernetes/arc_kubernetes_flux_configuration_resource.go +++ b/internal/services/arckubernetes/arc_kubernetes_flux_configuration_resource.go @@ -239,7 +239,7 @@ func (r ArcKubernetesFluxConfigurationResource) Arguments() map[string]*pluginsd "container_id": { Type: pluginsdk.TypeString, Required: true, - ValidateFunc: storageValidate.StorageContainerDataPlaneID, + ValidateFunc: storageValidate.StorageContainerDataPlaneIDForDomainSuffix(""), // TODO need to know the storage domain suffix at schema time! }, "account_key": { @@ -570,7 +570,7 @@ func (r ArcKubernetesFluxConfigurationResource) Create() sdk.ResourceFunc { properties.Properties.Bucket, properties.Properties.ConfigurationProtectedSettings = expandBucketDefinitionModel(model.Bucket) } else if _, exists = metadata.ResourceData.GetOk("blob_storage"); exists { properties.Properties.SourceKind = pointer.To(fluxconfiguration.SourceKindTypeAzureBlob) - azureBlob, err := expandArcAzureBlobDefinitionModel(model.BlobStorage) + azureBlob, err := expandArcAzureBlobDefinitionModel(model.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("expanding `blob_storage`: %+v", err) } @@ -624,7 +624,7 @@ func (r ArcKubernetesFluxConfigurationResource) Update() sdk.ResourceFunc { properties.Properties.ConfigurationProtectedSettings = nil if metadata.ResourceData.HasChange("blob_storage") { - azureBlob, err := expandArcAzureBlobDefinitionModel(model.BlobStorage) + azureBlob, err := expandArcAzureBlobDefinitionModel(model.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("expanding `blob_storage`: %+v", err) } @@ -717,7 +717,7 @@ func (r ArcKubernetesFluxConfigurationResource) Read() sdk.ResourceFunc { if model := resp.Model; model != nil { if properties := model.Properties; properties != nil { - blobStorage, err := flattenArcAzureBlobDefinitionModel(properties.AzureBlob, configModel.BlobStorage) + blobStorage, err := flattenArcAzureBlobDefinitionModel(properties.AzureBlob, configModel.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("flattening `blob_storage`: %+v", err) } @@ -762,7 +762,7 @@ func (r ArcKubernetesFluxConfigurationResource) Delete() sdk.ResourceFunc { } } -func expandArcAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel) (*fluxconfiguration.AzureBlobDefinition, error) { +func expandArcAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel, storageDomainSuffix string) (*fluxconfiguration.AzureBlobDefinition, error) { if len(inputList) == 0 { return nil, nil } @@ -778,7 +778,7 @@ func expandArcAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel) (*f } if input.ContainerID != "" { - id, err := parse.StorageContainerDataPlaneID(input.ContainerID) + id, err := parse.StorageContainerDataPlaneID(input.ContainerID, storageDomainSuffix) if err != nil { return nil, err } @@ -969,13 +969,13 @@ func expandRepositoryRefDefinitionModel(referenceType string, referenceValue str return &output, nil } -func flattenArcAzureBlobDefinitionModel(input *fluxconfiguration.AzureBlobDefinition, azureBlob []AzureBlobDefinitionModel) ([]AzureBlobDefinitionModel, error) { +func flattenArcAzureBlobDefinitionModel(input *fluxconfiguration.AzureBlobDefinition, azureBlob []AzureBlobDefinitionModel, storageDomainSuffix string) ([]AzureBlobDefinitionModel, error) { outputList := make([]AzureBlobDefinitionModel, 0) if input == nil { return outputList, nil } - id, err := parse.StorageContainerDataPlaneID(fmt.Sprintf("%s/%s", pointer.From(input.Url), pointer.From(input.ContainerName))) + id, err := parse.StorageContainerDataPlaneID(fmt.Sprintf("%s/%s", pointer.From(input.Url), pointer.From(input.ContainerName)), storageDomainSuffix) if err != nil { return nil, err } diff --git a/internal/services/containers/kubernetes_flux_configuration_resource.go b/internal/services/containers/kubernetes_flux_configuration_resource.go index d5b25970cdf89..2fb11f833b1f3 100644 --- a/internal/services/containers/kubernetes_flux_configuration_resource.go +++ b/internal/services/containers/kubernetes_flux_configuration_resource.go @@ -244,7 +244,7 @@ func (r KubernetesFluxConfigurationResource) Arguments() map[string]*pluginsdk.S "container_id": { Type: pluginsdk.TypeString, Required: true, - ValidateFunc: storageValidate.StorageContainerDataPlaneID, + ValidateFunc: storageValidate.StorageContainerDataPlaneIDForDomainSuffix(""), // TODO need to know the storage domain suffix at schema time! }, "account_key": { @@ -589,7 +589,7 @@ func (r KubernetesFluxConfigurationResource) Create() sdk.ResourceFunc { properties.Properties.Bucket, properties.Properties.ConfigurationProtectedSettings = expandBucketDefinitionModel(model.Bucket) } else if _, exists = metadata.ResourceData.GetOk("blob_storage"); exists { properties.Properties.SourceKind = pointer.To(fluxconfiguration.SourceKindTypeAzureBlob) - azureBlob, err := expandAzureBlobDefinitionModel(model.BlobStorage) + azureBlob, err := expandAzureBlobDefinitionModel(model.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("expanding `blob_storage`: %+v", err) } @@ -643,7 +643,7 @@ func (r KubernetesFluxConfigurationResource) Update() sdk.ResourceFunc { properties.Properties.ConfigurationProtectedSettings = nil if metadata.ResourceData.HasChange("blob_storage") { - azureBlob, err := expandAzureBlobDefinitionModel(model.BlobStorage) + azureBlob, err := expandAzureBlobDefinitionModel(model.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("expanding `blob_storage`: %+v", err) } @@ -735,7 +735,7 @@ func (r KubernetesFluxConfigurationResource) Read() sdk.ResourceFunc { if model := resp.Model; model != nil { if properties := model.Properties; properties != nil { - blobStorage, err := flattenAzureBlobDefinitionModel(properties.AzureBlob, configModel.BlobStorage) + blobStorage, err := flattenAzureBlobDefinitionModel(properties.AzureBlob, configModel.BlobStorage, metadata.Client.Storage.StorageDomainSuffix) if err != nil { return fmt.Errorf("flattening `blob_storage`: %+v", err) } @@ -780,7 +780,7 @@ func (r KubernetesFluxConfigurationResource) Delete() sdk.ResourceFunc { } } -func expandAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel) (*fluxconfiguration.AzureBlobDefinition, error) { +func expandAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel, storageDomainSuffix string) (*fluxconfiguration.AzureBlobDefinition, error) { if len(inputList) == 0 { return nil, nil } @@ -797,7 +797,7 @@ func expandAzureBlobDefinitionModel(inputList []AzureBlobDefinitionModel) (*flux } if input.ContainerID != "" { - id, err := parse.StorageContainerDataPlaneID(input.ContainerID) + id, err := parse.StorageContainerDataPlaneID(input.ContainerID, storageDomainSuffix) if err != nil { return nil, err } @@ -1002,13 +1002,13 @@ func expandRepositoryRefDefinitionModel(referenceType string, referenceValue str return &output, nil } -func flattenAzureBlobDefinitionModel(input *fluxconfiguration.AzureBlobDefinition, azureBlob []AzureBlobDefinitionModel) ([]AzureBlobDefinitionModel, error) { +func flattenAzureBlobDefinitionModel(input *fluxconfiguration.AzureBlobDefinition, azureBlob []AzureBlobDefinitionModel, storageDomainSuffix string) ([]AzureBlobDefinitionModel, error) { outputList := make([]AzureBlobDefinitionModel, 0) if input == nil { return outputList, nil } - id, err := parse.StorageContainerDataPlaneID(fmt.Sprintf("%s/%s", pointer.From(input.Url), pointer.From(input.ContainerName))) + id, err := parse.StorageContainerDataPlaneID(fmt.Sprintf("%s/%s", pointer.From(input.Url), pointer.From(input.ContainerName)), storageDomainSuffix) if err != nil { return nil, err } diff --git a/internal/services/legacy/virtual_machine_resource.go b/internal/services/legacy/virtual_machine_resource.go index e1d7d72e3e377..a1fb4be0c7522 100644 --- a/internal/services/legacy/virtual_machine_resource.go +++ b/internal/services/legacy/virtual_machine_resource.go @@ -30,7 +30,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" "github.com/tombuildsstuff/kermit/sdk/compute/2023-03-01/compute" "github.com/tombuildsstuff/kermit/sdk/network/2022-07-01/network" ) @@ -1033,24 +1033,24 @@ func resourceVirtualMachineDeleteVhd(ctx context.Context, storageClient *intStor } uri := *vhd.URI - id, err := blobs.ParseResourceID(uri) + id, err := blobs.ParseBlobID(uri, storageClient.StorageDomainSuffix) if err != nil { return fmt.Errorf("parsing %q: %s", uri, err) } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountName, id.BlobName, id.ContainerName, err) + return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountId.AccountName, id.BlobName, id.ContainerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q (Disk %q)!", id.AccountName, uri) + return fmt.Errorf("Unable to locate Storage Account %q (Disk %q)!", id.AccountId.AccountName, uri) } if err != nil { return fmt.Errorf("building Blobs Client: %s", err) } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Blobs Client: %s", err) } @@ -1058,8 +1058,8 @@ func resourceVirtualMachineDeleteVhd(ctx context.Context, storageClient *intStor input := blobs.DeleteInput{ DeleteSnapshots: false, } - if _, err := blobsClient.Delete(ctx, id.AccountName, id.ContainerName, id.BlobName, input); err != nil { - return fmt.Errorf("deleting Blob %q (Container %q / Account %q / Resource Group %q): %s", id.BlobName, id.ContainerName, id.AccountName, account.ResourceGroup, err) + if _, err := blobsClient.Delete(ctx, id.ContainerName, id.BlobName, input); err != nil { + return fmt.Errorf("deleting Blob %q (Container %q / Account %q / Resource Group %q): %s", id.BlobName, id.ContainerName, id.AccountId.AccountName, account.ResourceGroup, err) } return nil diff --git a/internal/services/legacy/virtual_machine_resource_test.go b/internal/services/legacy/virtual_machine_resource_test.go index 656633a55ee08..edf31b377ed5c 100644 --- a/internal/services/legacy/virtual_machine_resource_test.go +++ b/internal/services/legacy/virtual_machine_resource_test.go @@ -8,13 +8,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" ) type VirtualMachineResource struct{} @@ -249,15 +250,15 @@ func (VirtualMachineResource) unmanagedDiskExistsInContainer(blobName string, sh return fmt.Errorf("Unable to locate Storage Account %q!", accountName) } - client, err := clients.Storage.BlobsClient(ctx, *account) + client, err := clients.Storage.BlobsDataPlaneClient(ctx, *account, clients.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Blobs Client: %s", err) } input := blobs.GetPropertiesInput{} - props, err := client.GetProperties(ctx, accountName, containerName, blobName, input) + props, err := client.GetProperties(ctx, containerName, blobName, input) if err != nil { - if utils.ResponseWasNotFound(props.Response) { + if response.WasNotFound(props.HttpResponse) { if !shouldExist { return nil } diff --git a/internal/services/storage/blobs.go b/internal/services/storage/blobs.go index 116c69ad9445e..70e59082642ba 100644 --- a/internal/services/storage/blobs.go +++ b/internal/services/storage/blobs.go @@ -14,14 +14,11 @@ import ( "runtime" "strings" "sync" - "time" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" ) -const pollingInterval = time.Second * 15 - type BlobUpload struct { Client *blobs.Client @@ -96,7 +93,7 @@ func (sbu BlobUpload) copy(ctx context.Context) error { CopySource: sbu.SourceUri, MetaData: sbu.MetaData, } - if err := sbu.Client.CopyAndWait(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input, pollingInterval); err != nil { + if err := sbu.Client.CopyAndWait(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { return fmt.Errorf("copy/waiting: %s", err) } @@ -108,7 +105,7 @@ func (sbu BlobUpload) createEmptyAppendBlob(ctx context.Context) error { ContentType: utils.String(sbu.ContentType), MetaData: sbu.MetaData, } - if _, err := sbu.Client.PutAppendBlob(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input); err != nil { + if _, err := sbu.Client.PutAppendBlob(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { return fmt.Errorf("PutAppendBlob: %s", err) } @@ -124,7 +121,7 @@ func (sbu BlobUpload) createEmptyBlockBlob(ctx context.Context) error { ContentType: utils.String(sbu.ContentType), MetaData: sbu.MetaData, } - if _, err := sbu.Client.PutBlockBlob(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input); err != nil { + if _, err := sbu.Client.PutBlockBlob(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { return fmt.Errorf("PutBlockBlob: %s", err) } @@ -161,7 +158,7 @@ func (sbu BlobUpload) uploadBlockBlob(ctx context.Context) error { if sbu.ContentMD5 != "" { input.ContentMD5 = utils.String(sbu.ContentMD5) } - if err := sbu.Client.PutBlockBlobFromFile(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, file, input); err != nil { + if err := sbu.Client.PutBlockBlobFromFile(ctx, sbu.ContainerName, sbu.BlobName, file, input); err != nil { return fmt.Errorf("PutBlockBlobFromFile: %s", err) } @@ -178,7 +175,7 @@ func (sbu BlobUpload) createEmptyPageBlob(ctx context.Context) error { ContentType: utils.String(sbu.ContentType), MetaData: sbu.MetaData, } - if _, err := sbu.Client.PutPageBlob(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input); err != nil { + if _, err := sbu.Client.PutPageBlob(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { return fmt.Errorf("PutPageBlob: %s", err) } @@ -228,7 +225,7 @@ func (sbu BlobUpload) uploadPageBlob(ctx context.Context) error { ContentType: utils.String(sbu.ContentType), MetaData: sbu.MetaData, } - if _, err := sbu.Client.PutPageBlob(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input); err != nil { + if _, err := sbu.Client.PutPageBlob(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { return fmt.Errorf("PutPageBlob: %s", err) } @@ -373,7 +370,7 @@ func (sbu BlobUpload) blobPageUploadWorker(ctx context.Context, uploadCtx blobPa Content: chunk, } - if _, err := sbu.Client.PutPageUpdate(ctx, sbu.AccountName, sbu.ContainerName, sbu.BlobName, input); err != nil { + if _, err := sbu.Client.PutPageUpdate(ctx, sbu.ContainerName, sbu.BlobName, input); err != nil { uploadCtx.errors <- fmt.Errorf("writing page at offset %d for file %q: %s", page.offset, sbu.Source, err) uploadCtx.wg.Done() continue diff --git a/internal/services/storage/client/client.go b/internal/services/storage/client/client.go index 199f0dbbfc56c..bcd5750ee6907 100644 --- a/internal/services/storage/client/client.go +++ b/internal/services/storage/client/client.go @@ -4,61 +4,63 @@ package client import ( - "context" "fmt" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" // nolint: staticcheck - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" storage_v2023_01_01 "github.com/hashicorp/go-azure-sdk/resource-manager/storage/2023-01-01" "github.com/hashicorp/go-azure-sdk/resource-manager/storagesync/2020-03-01/cloudendpointresource" "github.com/hashicorp/go-azure-sdk/resource-manager/storagesync/2020-03-01/storagesyncservicesresource" "github.com/hashicorp/go-azure-sdk/resource-manager/storagesync/2020-03-01/syncgroupresource" + "github.com/hashicorp/go-azure-sdk/sdk/auth" "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" "github.com/hashicorp/terraform-provider-azurerm/internal/common" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/shim" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/accounts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/paths" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/directories" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/files" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/entities" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/filesystems" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/paths" ) type Client struct { - AccountsClient *storage.AccountsClient - FileSystemsClient *filesystems.Client + SubscriptionId string + ADLSGen2PathsClient *paths.Client - BlobServicesClient *storage.BlobServicesClient + AccountsClient *storage.AccountsClient BlobInventoryPoliciesClient *storage.BlobInventoryPoliciesClient + BlobServicesClient *storage.BlobServicesClient EncryptionScopesClient *storage.EncryptionScopesClient - Environment azure.Environment FileServicesClient *storage.FileServicesClient + FileSystemsClient *filesystems.Client SyncCloudEndpointsClient *cloudendpointresource.CloudEndpointResourceClient - SyncServiceClient *storagesyncservicesresource.StorageSyncServicesResourceClient SyncGroupsClient *syncgroupresource.SyncGroupResourceClient - SubscriptionId string + SyncServiceClient *storagesyncservicesresource.StorageSyncServicesResourceClient ResourceManager *storage_v2023_01_01.Client - resourceManagerAuthorizer autorest.Authorizer - storageAdAuth *autorest.Authorizer + AzureEnvironment azure.Environment + StorageDomainSuffix string + + authorizerForAad auth.Authorizer } func NewClient(o *common.ClientOptions) (*Client, error) { + storageSuffix, ok := o.Environment.Storage.DomainSuffix() + if !ok { + return nil, fmt.Errorf("determining domain suffix for storage in environment: %s", o.Environment.Name) + } + accountsClient := storage.NewAccountsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&accountsClient.Client, o.ResourceManagerAuthorizer) - fileSystemsClient := filesystems.NewWithEnvironment(o.AzureEnvironment) - o.ConfigureClient(&fileSystemsClient.Client, o.StorageAuthorizer) + fileSystemsClient, err := filesystems.NewWithBaseUri(*storageSuffix) + if err != nil { + return nil, fmt.Errorf("building Data Lake Store Filesystems client: %+v", err) + } + o.Configure(fileSystemsClient.Client, o.Authorizers.Storage) - adlsGen2PathsClient := paths.NewWithEnvironment(o.AzureEnvironment) - o.ConfigureClient(&adlsGen2PathsClient.Client, o.StorageAuthorizer) + adlsGen2PathsClient, err := paths.NewWithBaseUri(*storageSuffix) + if err != nil { + return nil, fmt.Errorf("building Data Lake Storage Path client: %+v", err) + } + o.Configure(adlsGen2PathsClient.Client, o.Authorizers.Storage) blobServicesClient := storage.NewBlobServicesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&blobServicesClient.Client, o.ResourceManagerAuthorizer) @@ -100,12 +102,11 @@ func NewClient(o *common.ClientOptions) (*Client, error) { // (which should fix #2977) when the storage clients have been moved in here client := Client{ AccountsClient: &accountsClient, - FileSystemsClient: &fileSystemsClient, - ADLSGen2PathsClient: &adlsGen2PathsClient, + FileSystemsClient: fileSystemsClient, + ADLSGen2PathsClient: adlsGen2PathsClient, BlobServicesClient: &blobServicesClient, BlobInventoryPoliciesClient: &blobInventoryPoliciesClient, EncryptionScopesClient: &encryptionScopesClient, - Environment: o.AzureEnvironment, FileServicesClient: &fileServicesClient, ResourceManager: resourceManager, SubscriptionId: o.SubscriptionId, @@ -113,195 +114,13 @@ func NewClient(o *common.ClientOptions) (*Client, error) { SyncServiceClient: syncServiceClient, SyncGroupsClient: syncGroupsClient, - resourceManagerAuthorizer: o.ResourceManagerAuthorizer, + AzureEnvironment: o.AzureEnvironment, + StorageDomainSuffix: *storageSuffix, } if o.StorageUseAzureAD { - client.storageAdAuth = &o.StorageAuthorizer + client.authorizerForAad = o.Authorizers.Storage } return &client, nil } - -func (client Client) AccountsDataPlaneClient(ctx context.Context, account accountDetails) (*accounts.Client, error) { - if client.storageAdAuth != nil { - accountsClient := accounts.NewWithEnvironment(client.Environment) - accountsClient.Client.Authorizer = *client.storageAdAuth - return &accountsClient, nil - } - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKey) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - accountsClient := accounts.NewWithEnvironment(client.Environment) - accountsClient.Client.Authorizer = storageAuth - return &accountsClient, nil -} - -func (client Client) BlobsClient(ctx context.Context, account accountDetails) (*blobs.Client, error) { - if client.storageAdAuth != nil { - blobsClient := blobs.NewWithEnvironment(client.Environment) - blobsClient.Client.Authorizer = *client.storageAdAuth - return &blobsClient, nil - } - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKey) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - blobsClient := blobs.NewWithEnvironment(client.Environment) - blobsClient.Client.Authorizer = storageAuth - return &blobsClient, nil -} - -func (client Client) ContainersClient(ctx context.Context, account accountDetails) (shim.StorageContainerWrapper, error) { - if client.storageAdAuth != nil { - containersClient := containers.NewWithEnvironment(client.Environment) - containersClient.Client.Authorizer = *client.storageAdAuth - shim := shim.NewDataPlaneStorageContainerWrapper(&containersClient) - return shim, nil - } - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKey) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - containersClient := containers.NewWithEnvironment(client.Environment) - containersClient.Client.Authorizer = storageAuth - - shim := shim.NewDataPlaneStorageContainerWrapper(&containersClient) - return shim, nil -} - -func (client Client) FileShareDirectoriesClient(ctx context.Context, account accountDetails) (*directories.Client, error) { - // NOTE: Files do not support AzureAD Authentication - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - directoriesClient := directories.NewWithEnvironment(client.Environment) - directoriesClient.Client.Authorizer = storageAuth - return &directoriesClient, nil -} - -func (client Client) FileShareFilesClient(ctx context.Context, account accountDetails) (*files.Client, error) { - // NOTE: Files do not support AzureAD Authentication - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - filesClient := files.NewWithEnvironment(client.Environment) - filesClient.Client.Authorizer = storageAuth - return &filesClient, nil -} - -func (client Client) FileSharesClient(ctx context.Context, account accountDetails) (shim.StorageShareWrapper, error) { - // NOTE: Files do not support AzureAD Authentication - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - sharesClient := shares.NewWithEnvironment(client.Environment) - sharesClient.Client.Authorizer = storageAuth - shim := shim.NewDataPlaneStorageShareWrapper(&sharesClient) - return shim, nil -} - -func (client Client) QueuesClient(ctx context.Context, account accountDetails) (shim.StorageQueuesWrapper, error) { - if client.storageAdAuth != nil { - queueClient := queues.NewWithEnvironment(client.Environment) - queueClient.Client.Authorizer = *client.storageAdAuth - return shim.NewDataPlaneStorageQueueWrapper(&queueClient), nil - } - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLite) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - queuesClient := queues.NewWithEnvironment(client.Environment) - queuesClient.Client.Authorizer = storageAuth - return shim.NewDataPlaneStorageQueueWrapper(&queuesClient), nil -} - -func (client Client) TableEntityClient(ctx context.Context, account accountDetails) (*entities.Client, error) { - // NOTE: Table Entity does not support AzureAD Authentication - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLiteForTable) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - entitiesClient := entities.NewWithEnvironment(client.Environment) - entitiesClient.Client.Authorizer = storageAuth - return &entitiesClient, nil -} - -func (client Client) TablesClient(ctx context.Context, account accountDetails) (shim.StorageTableWrapper, error) { - // NOTE: Tables do not support AzureAD Authentication - - accountKey, err := account.AccountKey(ctx, client) - if err != nil { - return nil, fmt.Errorf("retrieving Account Key: %s", err) - } - - storageAuth, err := autorest.NewSharedKeyAuthorizer(account.name, *accountKey, autorest.SharedKeyLiteForTable) - if err != nil { - return nil, fmt.Errorf("building Authorizer: %+v", err) - } - - tablesClient := tables.NewWithEnvironment(client.Environment) - tablesClient.Client.Authorizer = storageAuth - shim := shim.NewDataPlaneStorageTableWrapper(&tablesClient) - return shim, nil -} diff --git a/internal/services/storage/client/data_plane.go b/internal/services/storage/client/data_plane.go new file mode 100644 index 0000000000000..740d778b16583 --- /dev/null +++ b/internal/services/storage/client/data_plane.go @@ -0,0 +1,299 @@ +package client + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-azure-sdk/sdk/auth" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/shim" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/directories" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/files" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/queue/queues" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/entities" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/tables" +) + +type DataPlaneOperation struct { + SupportsAadAuthentication bool + SupportsSharedKeyAuthentication bool +} + +type EndpointType string + +const ( + EndpointTypeBlob = "blob" + EndpointTypeFile = "file" + EndpointTypeQueue = "queue" + EndpointTypeTable = "table" +) + +func dataPlaneEndpoint(account accountDetails, endpointType EndpointType) (*string, error) { + if account.Properties == nil { + return nil, fmt.Errorf("storage account %q has no properties", account.name) + } + if account.Properties.PrimaryEndpoints == nil { + return nil, fmt.Errorf("storage account %q has missing endpoints", account.name) + } + + var baseUri string + + switch endpointType { + case EndpointTypeBlob: + if account.Properties.PrimaryEndpoints.Blob != nil { + baseUri = strings.TrimSuffix(*account.Properties.PrimaryEndpoints.Blob, "/") + } + case EndpointTypeFile: + if account.Properties.PrimaryEndpoints.File != nil { + baseUri = strings.TrimSuffix(*account.Properties.PrimaryEndpoints.File, "/") + } + case EndpointTypeQueue: + if account.Properties.PrimaryEndpoints.Queue != nil { + baseUri = strings.TrimSuffix(*account.Properties.PrimaryEndpoints.Queue, "/") + } + case EndpointTypeTable: + if account.Properties.PrimaryEndpoints.Table != nil { + baseUri = strings.TrimSuffix(*account.Properties.PrimaryEndpoints.Table, "/") + } + default: + return nil, fmt.Errorf("internal-error: unrecognised endpoint type %q when building storage client", endpointType) + } + + if baseUri == "" { + return nil, fmt.Errorf("determining storage account %s endpoint for : %q", endpointType, account.name) + } + + return &baseUri, nil +} + +func (Client) DataPlaneOperationSupportingAnyAuthMethod() DataPlaneOperation { + return DataPlaneOperation{ + SupportsAadAuthentication: true, + SupportsSharedKeyAuthentication: true, + } +} + +func (Client) DataPlaneOperationSupportingOnlySharedKeyAuth() DataPlaneOperation { + return DataPlaneOperation{ + SupportsAadAuthentication: false, + SupportsSharedKeyAuthentication: true, + } +} + +func (client Client) ConfigureDataPlane(ctx context.Context, baseUri, clientName string, baseClient client.BaseClient, account accountDetails, operation DataPlaneOperation) error { + if operation.SupportsAadAuthentication && client.authorizerForAad != nil { + baseClient.SetAuthorizer(client.authorizerForAad) + return nil + } + + if operation.SupportsSharedKeyAuthentication { + accountKey, err := account.AccountKey(ctx, client) + if err != nil { + return fmt.Errorf("retrieving Storage Account Key: %s", err) + } + + storageAuth, err := auth.NewSharedKeyAuthorizer(account.name, *accountKey, auth.SharedKey) + if err != nil { + return fmt.Errorf("building Shared Key Authorizer for %s client: %+v", clientName, err) + } + + baseClient.SetAuthorizer(storageAuth) + return nil + } + + return fmt.Errorf("building %s client: no configured authentication types are supported", clientName) +} + +func (client Client) AccountsDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (*accounts.Client, error) { + const clientName = "Blob Storage Accounts" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeBlob) + if err != nil { + return nil, err + } + + apiClient, err := accounts.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return apiClient, nil +} + +func (client Client) BlobsDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (*blobs.Client, error) { + const clientName = "Blob Storage Blobs" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeBlob) + if err != nil { + return nil, err + } + + apiClient, err := blobs.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return apiClient, nil +} + +func (client Client) ContainersDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (shim.StorageContainerWrapper, error) { + const clientName = "Blob Storage Containers" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeBlob) + if err != nil { + return nil, err + } + + apiClient, err := containers.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return shim.NewDataPlaneStorageContainerWrapper(apiClient), nil +} + +func (client Client) FileShareDirectoriesDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (*directories.Client, error) { + const clientName = "File Storage Shares" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeFile) + if err != nil { + return nil, err + } + + apiClient, err := directories.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return apiClient, nil +} + +func (client Client) FileShareFilesDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (*files.Client, error) { + const clientName = "File Storage Share Files" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeFile) + if err != nil { + return nil, err + } + + apiClient, err := files.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return apiClient, nil +} + +func (client Client) FileSharesDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (shim.StorageShareWrapper, error) { + const clientName = "File Storage Share Shares" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeFile) + if err != nil { + return nil, err + } + + apiClient, err := shares.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return shim.NewDataPlaneStorageShareWrapper(apiClient), nil +} + +func (client Client) QueuesDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (shim.StorageQueuesWrapper, error) { + const clientName = "File Storage Queue Queues" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeQueue) + if err != nil { + return nil, err + } + + apiClient, err := queues.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return shim.NewDataPlaneStorageQueueWrapper(apiClient), nil +} + +func (client Client) TableEntityDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (*entities.Client, error) { + const clientName = "Table Storage Share Entities" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeTable) + if err != nil { + return nil, err + } + + apiClient, err := entities.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return apiClient, nil +} + +func (client Client) TablesDataPlaneClient(ctx context.Context, account accountDetails, operation DataPlaneOperation) (shim.StorageTableWrapper, error) { + const clientName = "Table Storage Share Tables" + + baseUri, err := dataPlaneEndpoint(account, EndpointTypeFile) + if err != nil { + return nil, err + } + + apiClient, err := tables.NewWithBaseUri(*baseUri) + if err != nil { + return nil, fmt.Errorf("building %s client: %+v", clientName, err) + } + + err = client.ConfigureDataPlane(ctx, *baseUri, clientName, apiClient.Client, account, operation) + if err != nil { + return nil, err + } + + return shim.NewDataPlaneStorageTableWrapper(apiClient), nil +} diff --git a/internal/services/storage/helpers/schema.go b/internal/services/storage/helpers/schema.go new file mode 100644 index 0000000000000..8d6cf8725a626 --- /dev/null +++ b/internal/services/storage/helpers/schema.go @@ -0,0 +1,27 @@ +package helpers + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type StorageIDValidationFunc func(id, storageDomainSuffix string) error + +func ImporterValidatingStorageResourceId(validateFunc StorageIDValidationFunc) *schema.ResourceImporter { + return &schema.ResourceImporter{ + StateContext: func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { + storageDomainSuffix := meta.(*clients.Client).Storage.StorageDomainSuffix + log.Printf("[DEBUG] Importing Storage Resource - parsing %q using domain suffix %q", d.Id(), storageDomainSuffix) + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.Timeout(schema.TimeoutRead)) + defer cancel() + } + return []*pluginsdk.ResourceData{d}, validateFunc(d.Id(), storageDomainSuffix) + }, + } +} diff --git a/internal/services/storage/migration/share.go b/internal/services/storage/migration/share.go index 6e48a71d3ae9a..75abecf1a189a 100644 --- a/internal/services/storage/migration/share.go +++ b/internal/services/storage/migration/share.go @@ -12,7 +12,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) var _ pluginsdk.StateUpgrade = ShareV0ToV1{} @@ -54,16 +55,18 @@ func (s ShareV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFunc { // name/resourceGroup/accountName parsedId := strings.Split(id, "/") if len(parsedId) != 3 { - return rawState, fmt.Errorf("Expected 3 segments in the ID but got %d", len(parsedId)) + return rawState, fmt.Errorf("expected 3 segments in the ID but got %d", len(parsedId)) } shareName := parsedId[0] accountName := parsedId[2] - environment := meta.(*clients.Client).Account.AzureEnvironment - client := shares.NewWithEnvironment(environment) + accountId, err := accounts.ParseAccountID(accountName, meta.(*clients.Client).Storage.StorageDomainSuffix) + if err != nil { + return rawState, fmt.Errorf("parsing Account ID: %v", err) + } - newResourceId := client.GetResourceID(accountName, shareName) + newResourceId := shares.NewShareID(*accountId, shareName) log.Printf("[DEBUG] Updating Resource ID from %q to %q", id, newResourceId) rawState["id"] = newResourceId @@ -72,7 +75,7 @@ func (s ShareV1ToV2) UpgradeFunc() pluginsdk.StateUpgraderFunc { } } -// the schema schema was used for both V0 and V1 +// this schema was used for both V0 and V1 func shareSchemaForV0AndV1() map[string]*pluginsdk.Schema { return map[string]*pluginsdk.Schema{ "name": { diff --git a/internal/services/storage/parse/storage_container_data_plane.go b/internal/services/storage/parse/storage_container_data_plane.go index 47dc25a7ec8ca..9dce550a5b760 100644 --- a/internal/services/storage/parse/storage_container_data_plane.go +++ b/internal/services/storage/parse/storage_container_data_plane.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) var _ resourceids.Id = StorageContainerDataPlaneId{} @@ -41,8 +41,8 @@ func NewStorageContainerDataPlaneId(accountName, domainSuffix, name string) Stor } } -func StorageContainerDataPlaneID(id string) (*StorageContainerDataPlaneId, error) { - parsed, err := containers.ParseResourceID(id) +func StorageContainerDataPlaneID(id, domainSuffix string) (*StorageContainerDataPlaneId, error) { + parsed, err := containers.ParseContainerID(id, domainSuffix) if err != nil { return nil, err } @@ -60,7 +60,7 @@ func StorageContainerDataPlaneID(id string) (*StorageContainerDataPlaneId, error domainNameSuffix := strings.TrimPrefix(host, fmt.Sprintf("%s.blob.", hostSegments[0])) return &StorageContainerDataPlaneId{ - AccountName: parsed.AccountName, + AccountName: parsed.AccountId.AccountName, DomainSuffix: domainNameSuffix, Name: parsed.ContainerName, }, nil diff --git a/internal/services/storage/parse/storage_queue_data_plane.go b/internal/services/storage/parse/storage_queue_data_plane.go index 3fd6770ec093d..fc09bf22d4d79 100644 --- a/internal/services/storage/parse/storage_queue_data_plane.go +++ b/internal/services/storage/parse/storage_queue_data_plane.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/queue/queues" ) var _ resourceids.Id = StorageQueueDataPlaneId{} @@ -41,8 +41,8 @@ func NewStorageQueueDataPlaneId(accountName, domainSuffix, name string) StorageQ } } -func StorageQueueDataPlaneID(id string) (*StorageQueueDataPlaneId, error) { - parsed, err := queues.ParseResourceID(id) +func StorageQueueDataPlaneID(id, domainSuffix string) (*StorageQueueDataPlaneId, error) { + parsed, err := queues.ParseQueueID(id, domainSuffix) if err != nil { return nil, err } @@ -60,7 +60,7 @@ func StorageQueueDataPlaneID(id string) (*StorageQueueDataPlaneId, error) { domainNameSuffix := strings.TrimPrefix(host, fmt.Sprintf("%s.queue.", hostSegments[0])) return &StorageQueueDataPlaneId{ - AccountName: parsed.AccountName, + AccountName: parsed.AccountId.AccountName, DomainSuffix: domainNameSuffix, Name: parsed.QueueName, }, nil diff --git a/internal/services/storage/parse/storage_share_data_plane.go b/internal/services/storage/parse/storage_share_data_plane.go index 321fff4a7a8fe..fa4335ebc6a9d 100644 --- a/internal/services/storage/parse/storage_share_data_plane.go +++ b/internal/services/storage/parse/storage_share_data_plane.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) // TODO: tests for this @@ -42,8 +42,8 @@ func NewStorageShareDataPlaneId(accountName, domainSuffix, name string) StorageS } } -func StorageShareDataPlaneID(id string) (*StorageShareDataPlaneId, error) { - parsed, err := shares.ParseResourceID(id) +func StorageShareDataPlaneID(id, domainSuffix string) (*StorageShareDataPlaneId, error) { + parsed, err := shares.ParseShareID(id, domainSuffix) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func StorageShareDataPlaneID(id string) (*StorageShareDataPlaneId, error) { domainNameSuffix := strings.TrimPrefix(host, fmt.Sprintf("%s.file.", hostSegments[0])) return &StorageShareDataPlaneId{ - AccountName: parsed.AccountName, + AccountName: parsed.AccountId.AccountName, DomainSuffix: domainNameSuffix, Name: parsed.ShareName, }, nil diff --git a/internal/services/storage/parse/storage_table_data_plane.go b/internal/services/storage/parse/storage_table_data_plane.go index bf668f89f317e..789801810ab79 100644 --- a/internal/services/storage/parse/storage_table_data_plane.go +++ b/internal/services/storage/parse/storage_table_data_plane.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/tables" ) // TODO: tests for this @@ -42,8 +42,8 @@ func NewStorageTableDataPlaneId(accountName, domainSuffix, name string) StorageT } } -func StorageTableDataPlaneID(input string) (*StorageTableDataPlaneId, error) { - parsed, err := tables.ParseResourceID(input) +func StorageTableDataPlaneID(input, domainSuffix string) (*StorageTableDataPlaneId, error) { + parsed, err := tables.ParseTableID(input, domainSuffix) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func StorageTableDataPlaneID(input string) (*StorageTableDataPlaneId, error) { domainNameSuffix := strings.TrimPrefix(host, fmt.Sprintf("%s.table.", hostSegments[0])) return &StorageTableDataPlaneId{ - AccountName: parsed.AccountName, + AccountName: parsed.AccountId.AccountName, DomainSuffix: domainNameSuffix, Name: parsed.TableName, }, nil diff --git a/internal/services/storage/shim/containers.go b/internal/services/storage/shim/containers.go index 55156bb5d9f31..c29325ce4aa26 100644 --- a/internal/services/storage/shim/containers.go +++ b/internal/services/storage/shim/containers.go @@ -6,16 +6,16 @@ package shim import ( "context" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) type StorageContainerWrapper interface { - Create(ctx context.Context, resourceGroup, accountName, containerName string, input containers.CreateInput) error - Delete(ctx context.Context, resourceGroup, accountName, containerName string) error - Exists(ctx context.Context, resourceGroup, accountName, containerName string) (*bool, error) - Get(ctx context.Context, resourceGroup, accountName, containerName string) (*StorageContainerProperties, error) - UpdateAccessLevel(ctx context.Context, resourceGroup, accountName, containerName string, level containers.AccessLevel) error - UpdateMetaData(ctx context.Context, resourceGroup, accountName, containerName string, metadata map[string]string) error + Create(ctx context.Context, containerName string, input containers.CreateInput) error + Delete(ctx context.Context, containerName string) error + Exists(ctx context.Context, containerName string) (*bool, error) + Get(ctx context.Context, containerName string) (*StorageContainerProperties, error) + UpdateAccessLevel(ctx context.Context, containerName string, level containers.AccessLevel) error + UpdateMetaData(ctx context.Context, containerName string, metaData map[string]string) error } type StorageContainerProperties struct { diff --git a/internal/services/storage/shim/containers_data_plane.go b/internal/services/storage/shim/containers_data_plane.go index 2310188f5230c..e365ef9819fbb 100644 --- a/internal/services/storage/shim/containers_data_plane.go +++ b/internal/services/storage/shim/containers_data_plane.go @@ -6,12 +6,10 @@ package shim import ( "context" "fmt" - "strings" - "time" - "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) type DataPlaneStorageContainerWrapper struct { @@ -24,59 +22,37 @@ func NewDataPlaneStorageContainerWrapper(client *containers.Client) StorageConta } } -func (w DataPlaneStorageContainerWrapper) Create(ctx context.Context, _, accountName, containerName string, input containers.CreateInput) error { - timeout, ok := ctx.Deadline() - if !ok { - return fmt.Errorf("context is missing a timeout") - } - - if resp, err := w.client.Create(ctx, accountName, containerName, input); err != nil { - // If we fail due to previous delete still in progress, then we can retry - if utils.ResponseWasConflict(resp.Response) && strings.Contains(err.Error(), "ContainerBeingDeleted") { - stateConf := &pluginsdk.StateChangeConf{ - Pending: []string{"waitingOnDelete"}, - Target: []string{"succeeded"}, - Refresh: w.createRefreshFunc(ctx, accountName, containerName, input), - PollInterval: 10 * time.Second, - NotFoundChecks: 180, - Timeout: time.Until(timeout), - } - - if _, err := stateConf.WaitForStateContext(ctx); err != nil { - return fmt.Errorf("failed creating container: %+v", err) - } - } else { - return fmt.Errorf("failed creating container: %+v", err) - } +func (w DataPlaneStorageContainerWrapper) Create(ctx context.Context, containerName string, input containers.CreateInput) error { + if _, err := w.client.Create(ctx, containerName, input); err != nil { + return fmt.Errorf("creating container: %+v", err) } return nil } -func (w DataPlaneStorageContainerWrapper) Delete(ctx context.Context, _, accountName, containerName string) error { - resp, err := w.client.Delete(ctx, accountName, containerName) - if utils.ResponseWasNotFound(resp) { +func (w DataPlaneStorageContainerWrapper) Delete(ctx context.Context, containerName string) error { + resp, err := w.client.Delete(ctx, containerName) + if response.WasNotFound(resp.HttpResponse) { return nil } return err } -func (w DataPlaneStorageContainerWrapper) Exists(ctx context.Context, _, accountName, containerName string) (*bool, error) { - existing, err := w.client.GetProperties(ctx, accountName, containerName) +func (w DataPlaneStorageContainerWrapper) Exists(ctx context.Context, containerName string) (*bool, error) { + existing, err := w.client.GetProperties(ctx, containerName, containers.GetPropertiesInput{}) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return nil, err + if response.WasNotFound(existing.HttpResponse) { + return pointer.To(false), nil } + return nil, err } - - exists := !utils.ResponseWasNotFound(existing.Response) - return &exists, nil + return pointer.To(true), nil } -func (w DataPlaneStorageContainerWrapper) Get(ctx context.Context, _, accountName, containerName string) (*StorageContainerProperties, error) { - props, err := w.client.GetProperties(ctx, accountName, containerName) +func (w DataPlaneStorageContainerWrapper) Get(ctx context.Context, containerName string) (*StorageContainerProperties, error) { + props, err := w.client.GetProperties(ctx, containerName, containers.GetPropertiesInput{}) if err != nil { - if utils.ResponseWasNotFound(props.Response) { + if response.WasNotFound(props.HttpResponse) { return nil, nil } @@ -91,29 +67,18 @@ func (w DataPlaneStorageContainerWrapper) Get(ctx context.Context, _, accountNam }, nil } -func (w DataPlaneStorageContainerWrapper) UpdateAccessLevel(ctx context.Context, _, accountName, containerName string, level containers.AccessLevel) error { - _, err := w.client.SetAccessControl(ctx, accountName, containerName, level) - return err -} - -func (w DataPlaneStorageContainerWrapper) UpdateMetaData(ctx context.Context, _, accountName, containerName string, metaData map[string]string) error { - _, err := w.client.SetMetaData(ctx, accountName, containerName, metaData) +func (w DataPlaneStorageContainerWrapper) UpdateAccessLevel(ctx context.Context, containerName string, level containers.AccessLevel) error { + input := containers.SetAccessControlInput{ + AccessLevel: level, + } + _, err := w.client.SetAccessControl(ctx, containerName, input) return err } -func (w DataPlaneStorageContainerWrapper) createRefreshFunc(ctx context.Context, accountName string, containerName string, input containers.CreateInput) pluginsdk.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := w.client.Create(ctx, accountName, containerName, input) - if err != nil { - if !utils.ResponseWasConflict(resp.Response) { - return nil, "", err - } - - if utils.ResponseWasConflict(resp.Response) && strings.Contains(err.Error(), "ContainerBeingDeleted") { - return nil, "waitingOnDelete", nil - } - } - - return "succeeded", "succeeded", nil +func (w DataPlaneStorageContainerWrapper) UpdateMetaData(ctx context.Context, containerName string, metaData map[string]string) error { + input := containers.SetMetaDataInput{ + MetaData: metaData, } + _, err := w.client.SetMetaData(ctx, containerName, input) + return err } diff --git a/internal/services/storage/shim/queues.go b/internal/services/storage/shim/queues.go index d145093014b02..d6d1d4e432c92 100644 --- a/internal/services/storage/shim/queues.go +++ b/internal/services/storage/shim/queues.go @@ -6,17 +6,17 @@ package shim import ( "context" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/queue/queues" ) type StorageQueuesWrapper interface { - Create(ctx context.Context, resourceGroup, accountName, queueName string, metaData map[string]string) error - Delete(ctx context.Context, resourceGroup, accountName, queueName string) error - Exists(ctx context.Context, resourceGroup, accountName, queueName string) (*bool, error) - Get(ctx context.Context, resourceGroup, accountName, queueName string) (*StorageQueueProperties, error) - GetServiceProperties(ctx context.Context, resourceGroup, accountName string) (*queues.StorageServiceProperties, error) - UpdateMetaData(ctx context.Context, resourceGroup, accountName, queueName string, metaData map[string]string) error - UpdateServiceProperties(ctx context.Context, resourceGroup, accountName string, properties queues.StorageServiceProperties) error + Create(ctx context.Context, queueName string, metaData map[string]string) error + Delete(ctx context.Context, queueName string) error + Exists(ctx context.Context, queueName string) (*bool, error) + Get(ctx context.Context, queueName string) (*StorageQueueProperties, error) + GetServiceProperties(ctx context.Context) (*queues.StorageServiceProperties, error) + UpdateMetaData(ctx context.Context, queueName string, metaData map[string]string) error + UpdateServiceProperties(ctx context.Context, properties queues.StorageServiceProperties) error } type StorageQueueProperties struct { diff --git a/internal/services/storage/shim/queues_data_plane.go b/internal/services/storage/shim/queues_data_plane.go index 3052f205761af..8b52b1e9d0685 100644 --- a/internal/services/storage/shim/queues_data_plane.go +++ b/internal/services/storage/shim/queues_data_plane.go @@ -6,8 +6,9 @@ package shim import ( "context" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/queue/queues" ) type DataPlaneStorageQueueWrapper struct { @@ -20,32 +21,34 @@ func NewDataPlaneStorageQueueWrapper(client *queues.Client) StorageQueuesWrapper } } -func (w DataPlaneStorageQueueWrapper) Create(ctx context.Context, _, accountName, queueName string, metaData map[string]string) error { - _, err := w.client.Create(ctx, accountName, queueName, metaData) +func (w DataPlaneStorageQueueWrapper) Create(ctx context.Context, queueName string, metaData map[string]string) error { + input := queues.CreateInput{ + MetaData: metaData, + } + _, err := w.client.Create(ctx, queueName, input) return err } -func (w DataPlaneStorageQueueWrapper) Delete(ctx context.Context, _, accountName, queueName string) error { - _, err := w.client.Delete(ctx, accountName, queueName) +func (w DataPlaneStorageQueueWrapper) Delete(ctx context.Context, queueName string) error { + _, err := w.client.Delete(ctx, queueName) return err } -func (w DataPlaneStorageQueueWrapper) Exists(ctx context.Context, _, accountName, queueName string) (*bool, error) { - existing, err := w.client.GetMetaData(ctx, accountName, queueName) +func (w DataPlaneStorageQueueWrapper) Exists(ctx context.Context, queueName string) (*bool, error) { + existing, err := w.client.GetMetaData(ctx, queueName) if err != nil { - if utils.ResponseWasNotFound(existing.Response) { - return utils.Bool(false), nil + if response.WasNotFound(existing.HttpResponse) { + return pointer.To(false), nil } return nil, err } - - return utils.Bool(true), nil + return pointer.To(true), nil } -func (w DataPlaneStorageQueueWrapper) Get(ctx context.Context, _, accountName, queueName string) (*StorageQueueProperties, error) { - props, err := w.client.GetMetaData(ctx, accountName, queueName) +func (w DataPlaneStorageQueueWrapper) Get(ctx context.Context, queueName string) (*StorageQueueProperties, error) { + props, err := w.client.GetMetaData(ctx, queueName) if err != nil { - if utils.ResponseWasNotFound(props.Response) { + if response.WasNotFound(props.HttpResponse) { return nil, nil } return nil, err @@ -56,10 +59,10 @@ func (w DataPlaneStorageQueueWrapper) Get(ctx context.Context, _, accountName, q }, nil } -func (w DataPlaneStorageQueueWrapper) GetServiceProperties(ctx context.Context, resourceGroup, accountName string) (*queues.StorageServiceProperties, error) { - serviceProps, err := w.client.GetServiceProperties(ctx, accountName) +func (w DataPlaneStorageQueueWrapper) GetServiceProperties(ctx context.Context) (*queues.StorageServiceProperties, error) { + serviceProps, err := w.client.GetServiceProperties(ctx) if err != nil { - if utils.ResponseWasNotFound(serviceProps.Response) { + if response.WasNotFound(serviceProps.HttpResponse) { return nil, nil } return nil, err @@ -68,12 +71,18 @@ func (w DataPlaneStorageQueueWrapper) GetServiceProperties(ctx context.Context, return &serviceProps.StorageServiceProperties, nil } -func (w DataPlaneStorageQueueWrapper) UpdateMetaData(ctx context.Context, _, accountName, queueName string, metaData map[string]string) error { - _, err := w.client.SetMetaData(ctx, accountName, queueName, metaData) +func (w DataPlaneStorageQueueWrapper) UpdateMetaData(ctx context.Context, queueName string, metaData map[string]string) error { + input := queues.SetMetaDataInput{ + MetaData: metaData, + } + _, err := w.client.SetMetaData(ctx, queueName, input) return err } -func (w DataPlaneStorageQueueWrapper) UpdateServiceProperties(ctx context.Context, _, accountName string, properties queues.StorageServiceProperties) error { - _, err := w.client.SetServiceProperties(ctx, accountName, properties) +func (w DataPlaneStorageQueueWrapper) UpdateServiceProperties(ctx context.Context, properties queues.StorageServiceProperties) error { + input := queues.SetStorageServicePropertiesInput{ + Properties: properties, + } + _, err := w.client.SetServiceProperties(ctx, input) return err } diff --git a/internal/services/storage/shim/shares.go b/internal/services/storage/shim/shares.go index 702e34b492b2e..375595520e300 100644 --- a/internal/services/storage/shim/shares.go +++ b/internal/services/storage/shim/shares.go @@ -6,18 +6,18 @@ package shim import ( "context" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) type StorageShareWrapper interface { - Create(ctx context.Context, resourceGroup, accountName, shareName string, input shares.CreateInput) error - Delete(ctx context.Context, resourceGroup, accountName, shareName string) error - Exists(ctx context.Context, resourceGroup, accountName, shareName string) (*bool, error) - Get(ctx context.Context, resourceGroup, accountName, shareName string) (*StorageShareProperties, error) - UpdateACLs(ctx context.Context, resourceGroup, accountName, shareName string, acls []shares.SignedIdentifier) error - UpdateMetaData(ctx context.Context, resourceGroup, accountName, shareName string, metaData map[string]string) error - UpdateQuota(ctx context.Context, resourceGroup, accountName, shareName string, quotaGB int) error - UpdateTier(ctx context.Context, resourceGroup, accountName, shareName string, tier shares.AccessTier) error + Create(ctx context.Context, shareName string, input shares.CreateInput) error + Delete(ctx context.Context, shareName string) error + Exists(ctx context.Context, shareName string) (*bool, error) + Get(ctx context.Context, shareName string) (*StorageShareProperties, error) + UpdateACLs(ctx context.Context, shareName string, input shares.SetAclInput) error + UpdateMetaData(ctx context.Context, shareName string, metaData map[string]string) error + UpdateQuota(ctx context.Context, shareName string, quotaGB int) error + UpdateTier(ctx context.Context, shareName string, tier shares.AccessTier) error } type StorageShareProperties struct { diff --git a/internal/services/storage/shim/shares_data_plane.go b/internal/services/storage/shim/shares_data_plane.go index ae88b650cd55e..a251059fd1b3f 100644 --- a/internal/services/storage/shim/shares_data_plane.go +++ b/internal/services/storage/shim/shares_data_plane.go @@ -6,12 +6,10 @@ package shim import ( "context" "fmt" - "strings" - "time" - "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) type DataPlaneStorageShareWrapper struct { @@ -24,66 +22,43 @@ func NewDataPlaneStorageShareWrapper(client *shares.Client) StorageShareWrapper } } -func (w DataPlaneStorageShareWrapper) Create(ctx context.Context, _, accountName, shareName string, input shares.CreateInput) error { - timeout, ok := ctx.Deadline() - if !ok { - return fmt.Errorf("context is missing a timeout") +func (w DataPlaneStorageShareWrapper) Create(ctx context.Context, shareName string, input shares.CreateInput) error { + if _, err := w.client.Create(ctx, shareName, input); err != nil { + return fmt.Errorf("creating share: %+v", err) } - - resp, err := w.client.Create(ctx, accountName, shareName, input) - if err == nil { - return nil - } - - // If we fail due to previous delete still in progress, then we can retry - if utils.ResponseWasConflict(resp) && strings.Contains(err.Error(), "ShareBeingDeleted") { - stateConf := &pluginsdk.StateChangeConf{ - Pending: []string{"waitingOnDelete"}, - Target: []string{"succeeded"}, - Refresh: w.createRefreshFunc(ctx, accountName, shareName, input), - PollInterval: 10 * time.Second, - NotFoundChecks: 180, - Timeout: time.Until(timeout), - } - - _, err := stateConf.WaitForStateContext(ctx) - return err - } - - // otherwise it's a legit error, so raise it - return err + return nil } -func (w DataPlaneStorageShareWrapper) Delete(ctx context.Context, _, accountName, shareName string) error { - deleteSnapshots := true - _, err := w.client.Delete(ctx, accountName, shareName, deleteSnapshots) +func (w DataPlaneStorageShareWrapper) Delete(ctx context.Context, shareName string) error { + input := shares.DeleteInput{ + DeleteSnapshots: true, + } + _, err := w.client.Delete(ctx, shareName, input) return err } -func (w DataPlaneStorageShareWrapper) Exists(ctx context.Context, _, accountName, shareName string) (*bool, error) { - existing, err := w.client.GetProperties(ctx, accountName, shareName) +func (w DataPlaneStorageShareWrapper) Exists(ctx context.Context, shareName string) (*bool, error) { + existing, err := w.client.GetProperties(ctx, shareName) if err != nil { - if utils.ResponseWasNotFound(existing.Response) { - return nil, nil + if response.WasNotFound(existing.HttpResponse) { + return pointer.To(false), nil } - return nil, err } - - return utils.Bool(true), nil + return pointer.To(true), nil } -func (w DataPlaneStorageShareWrapper) Get(ctx context.Context, _, accountName, shareName string) (*StorageShareProperties, error) { - props, err := w.client.GetProperties(ctx, accountName, shareName) +func (w DataPlaneStorageShareWrapper) Get(ctx context.Context, shareName string) (*StorageShareProperties, error) { + props, err := w.client.GetProperties(ctx, shareName) if err != nil { - if utils.ResponseWasNotFound(props.Response) { + if response.WasNotFound(props.HttpResponse) { return nil, nil } return nil, err } - acls, err := w.client.GetACL(ctx, accountName, shareName) + acls, err := w.client.GetACL(ctx, shareName) if err != nil { return nil, err } @@ -97,44 +72,30 @@ func (w DataPlaneStorageShareWrapper) Get(ctx context.Context, _, accountName, s }, nil } -func (w DataPlaneStorageShareWrapper) UpdateACLs(ctx context.Context, _, accountName, shareName string, acls []shares.SignedIdentifier) error { - _, err := w.client.SetACL(ctx, accountName, shareName, acls) +func (w DataPlaneStorageShareWrapper) UpdateACLs(ctx context.Context, shareName string, input shares.SetAclInput) error { + _, err := w.client.SetACL(ctx, shareName, input) return err } -func (w DataPlaneStorageShareWrapper) UpdateMetaData(ctx context.Context, _, accountName, shareName string, metaData map[string]string) error { - _, err := w.client.SetMetaData(ctx, accountName, shareName, metaData) +func (w DataPlaneStorageShareWrapper) UpdateMetaData(ctx context.Context, shareName string, metaData map[string]string) error { + input := shares.SetMetaDataInput{ + MetaData: metaData, + } + _, err := w.client.SetMetaData(ctx, shareName, input) return err } -func (w DataPlaneStorageShareWrapper) UpdateQuota(ctx context.Context, _, accountName, shareName string, quotaGB int) error { - _, err := w.client.SetProperties(ctx, accountName, shareName, shares.ShareProperties{ +func (w DataPlaneStorageShareWrapper) UpdateQuota(ctx context.Context, shareName string, quotaGB int) error { + _, err := w.client.SetProperties(ctx, shareName, shares.ShareProperties{ QuotaInGb: "aGB, }) return err } -func (w DataPlaneStorageShareWrapper) UpdateTier(ctx context.Context, _, accountname, shareName string, tier shares.AccessTier) error { +func (w DataPlaneStorageShareWrapper) UpdateTier(ctx context.Context, shareName string, tier shares.AccessTier) error { props := shares.ShareProperties{ AccessTier: &tier, } - _, err := w.client.SetProperties(ctx, accountname, shareName, props) + _, err := w.client.SetProperties(ctx, shareName, props) return err } - -func (w DataPlaneStorageShareWrapper) createRefreshFunc(ctx context.Context, accountName string, shareName string, input shares.CreateInput) pluginsdk.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := w.client.Create(ctx, accountName, shareName, input) - if err != nil { - if !utils.ResponseWasConflict(resp) { - return nil, "", err - } - - if utils.ResponseWasConflict(resp) && strings.Contains(err.Error(), "ShareBeingDeleted") { - return nil, "waitingOnDelete", nil - } - } - - return "succeeded", "succeeded", nil - } -} diff --git a/internal/services/storage/shim/tables.go b/internal/services/storage/shim/tables.go index f928d83fcea03..0734e971a0465 100644 --- a/internal/services/storage/shim/tables.go +++ b/internal/services/storage/shim/tables.go @@ -6,13 +6,13 @@ package shim import ( "context" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/tables" ) type StorageTableWrapper interface { - Create(ctx context.Context, resourceGroup string, accountName string, tableName string) error - Delete(ctx context.Context, resourceGroup string, accountName string, tableName string) error - Exists(ctx context.Context, resourceGroup string, accountName string, tableName string) (*bool, error) - GetACLs(ctx context.Context, resourceGroup string, accountName string, tableName string) (*[]tables.SignedIdentifier, error) - UpdateACLs(ctx context.Context, resourceGroup string, accountName string, tableName string, acls []tables.SignedIdentifier) error + Create(ctx context.Context, tableName string) error + Delete(ctx context.Context, tableName string) error + Exists(ctx context.Context, tableName string) (*bool, error) + GetACLs(ctx context.Context, tableName string) (*[]tables.SignedIdentifier, error) + UpdateACLs(ctx context.Context, tableName string, acls []tables.SignedIdentifier) error } diff --git a/internal/services/storage/shim/tables_data_plane.go b/internal/services/storage/shim/tables_data_plane.go index f8a998f06069a..447fca8946776 100644 --- a/internal/services/storage/shim/tables_data_plane.go +++ b/internal/services/storage/shim/tables_data_plane.go @@ -6,8 +6,9 @@ package shim import ( "context" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/tables" ) type DataPlaneStorageTableWrapper struct { @@ -20,31 +21,29 @@ func NewDataPlaneStorageTableWrapper(client *tables.Client) StorageTableWrapper } } -func (w DataPlaneStorageTableWrapper) Create(ctx context.Context, _, accountName, tableName string) error { - _, err := w.client.Create(ctx, accountName, tableName) +func (w DataPlaneStorageTableWrapper) Create(ctx context.Context, tableName string) error { + _, err := w.client.Create(ctx, tableName) return err } -func (w DataPlaneStorageTableWrapper) Delete(ctx context.Context, _, accountName, tableName string) error { - _, err := w.client.Delete(ctx, accountName, tableName) +func (w DataPlaneStorageTableWrapper) Delete(ctx context.Context, tableName string) error { + _, err := w.client.Delete(ctx, tableName) return err } -func (w DataPlaneStorageTableWrapper) Exists(ctx context.Context, _, accountName, tableName string) (*bool, error) { - existing, err := w.client.Exists(ctx, accountName, tableName) +func (w DataPlaneStorageTableWrapper) Exists(ctx context.Context, tableName string) (*bool, error) { + existing, err := w.client.Exists(ctx, tableName) if err != nil { - if utils.ResponseWasNotFound(existing) { - return nil, nil + if response.WasNotFound(existing.HttpResponse) { + return pointer.To(false), nil } - return nil, err } - - return utils.Bool(true), nil + return pointer.To(true), nil } -func (w DataPlaneStorageTableWrapper) GetACLs(ctx context.Context, _, accountName, tableName string) (*[]tables.SignedIdentifier, error) { - acls, err := w.client.GetACL(ctx, accountName, tableName) +func (w DataPlaneStorageTableWrapper) GetACLs(ctx context.Context, tableName string) (*[]tables.SignedIdentifier, error) { + acls, err := w.client.GetACL(ctx, tableName) if err != nil { return nil, err } @@ -52,7 +51,7 @@ func (w DataPlaneStorageTableWrapper) GetACLs(ctx context.Context, _, accountNam return &acls.SignedIdentifiers, nil } -func (w DataPlaneStorageTableWrapper) UpdateACLs(ctx context.Context, _, accountName, tableName string, acls []tables.SignedIdentifier) error { - _, err := w.client.SetACL(ctx, accountName, tableName, acls) +func (w DataPlaneStorageTableWrapper) UpdateACLs(ctx context.Context, tableName string, acls []tables.SignedIdentifier) error { + _, err := w.client.SetACL(ctx, tableName, acls) return err } diff --git a/internal/services/storage/storage_account_resource.go b/internal/services/storage/storage_account_resource.go index 49eb2adb7528a..42b28436098ca 100644 --- a/internal/services/storage/storage_account_resource.go +++ b/internal/services/storage/storage_account_resource.go @@ -39,8 +39,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/accounts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/queue/queues" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/queue/queues" ) var ( @@ -1303,7 +1303,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e existing, err := client.GetProperties(ctx, id.ResourceGroupName, id.StorageAccountName, "") if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing %s: %s", id, err) + return fmt.Errorf("checking for existing %s: %s", id, err) } } @@ -1566,16 +1566,15 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e if !supportLevel.supportQueue { return fmt.Errorf("`queue_properties` aren't supported for account kind %q in sku tier %q", accountKind, accountTier) } - storageClient := meta.(*clients.Client).Storage - account, err := storageClient.FindAccount(ctx, id.StorageAccountName) + accountDetails, err := storageClient.FindAccount(ctx, id.StorageAccountName) if err != nil { return fmt.Errorf("retrieving %s: %+v", id, err) } - if account == nil { + if accountDetails == nil { return fmt.Errorf("unable to locate %q", id) } - queueClient, err := storageClient.QueuesClient(ctx, *account) + queueClient, err := storageClient.QueuesDataPlaneClient(ctx, *accountDetails, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Queues Client: %s", err) } @@ -1585,7 +1584,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e return fmt.Errorf("expanding `queue_properties`: %+v", err) } - if err = queueClient.UpdateServiceProperties(ctx, id.ResourceGroupName, id.StorageAccountName, queueProperties); err != nil { + if err = queueClient.UpdateServiceProperties(ctx, queueProperties); err != nil { return fmt.Errorf("updating Queue Properties: %+v", err) } } @@ -1622,7 +1621,6 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e if !supportLevel.supportStaticWebsite { return fmt.Errorf("`static_website` aren't supported for account kind %q in sku tier %q", accountKind, accountTier) } - storageClient := meta.(*clients.Client).Storage account, err := storageClient.FindAccount(ctx, id.StorageAccountName) if err != nil { @@ -1632,7 +1630,7 @@ func resourceStorageAccountCreate(d *pluginsdk.ResourceData, meta interface{}) e return fmt.Errorf("unable to locate %s", id) } - accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account) + accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Accounts Data Plane Client: %s", err) } @@ -2044,7 +2042,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e return fmt.Errorf("unable to locate %s", *id) } - queueClient, err := storageClient.QueuesClient(ctx, *account) + queueClient, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Queues Client: %s", err) } @@ -2054,7 +2052,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e return fmt.Errorf("expanding `queue_properties` for %s: %+v", *id, err) } - if err = queueClient.UpdateServiceProperties(ctx, account.ResourceGroup, id.StorageAccountName, queueProperties); err != nil { + if err = queueClient.UpdateServiceProperties(ctx, queueProperties); err != nil { return fmt.Errorf("updating Queue Properties for %s: %+v", *id, err) } } @@ -2099,7 +2097,7 @@ func resourceStorageAccountUpdate(d *pluginsdk.ResourceData, meta interface{}) e return fmt.Errorf("unable to locate %s", *id) } - accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account) + accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Data Plane client for %s: %+v", *id, err) } @@ -2378,12 +2376,12 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err } if supportLevel.supportQueue { - queueClient, err := storageClient.QueuesClient(ctx, *account) + queueClient, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Queues Client: %s", err) } - queueProps, err := queueClient.GetServiceProperties(ctx, id.ResourceGroupName, id.StorageAccountName) + queueProps, err := queueClient.GetServiceProperties(ctx) if err != nil { return fmt.Errorf("retrieving queue properties for %s: %+v", *id, err) } @@ -2407,13 +2405,7 @@ func resourceStorageAccountRead(d *pluginsdk.ResourceData, meta interface{}) err } if supportLevel.supportStaticWebsite { - storageClient := meta.(*clients.Client).Storage - account, err := storageClient.FindAccount(ctx, id.StorageAccountName) - if err != nil { - return fmt.Errorf("retrieving %s: %+v", *id, err) - } - - accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account) + accountsClient, err := storageClient.AccountsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Accounts Data Plane Client: %s", err) } @@ -3708,18 +3700,16 @@ func flattenedSharePropertiesSMB(input *storage.SmbSetting) []interface{} { } func flattenStaticWebsiteProperties(input accounts.GetServicePropertiesResult) []interface{} { - if storageServiceProps := input.StorageServiceProperties; storageServiceProps != nil { - if staticWebsite := storageServiceProps.StaticWebsite; staticWebsite != nil { - if !staticWebsite.Enabled { - return []interface{}{} - } + if staticWebsite := input.StaticWebsite; staticWebsite != nil { + if !staticWebsite.Enabled { + return []interface{}{} + } - return []interface{}{ - map[string]interface{}{ - "index_document": staticWebsite.IndexDocument, - "error_404_document": staticWebsite.ErrorDocument404Path, - }, - } + return []interface{}{ + map[string]interface{}{ + "index_document": staticWebsite.IndexDocument, + "error_404_document": staticWebsite.ErrorDocument404Path, + }, } } return []interface{}{} diff --git a/internal/services/storage/storage_blob_data_source.go b/internal/services/storage/storage_blob_data_source.go index cbe1c2a1b6557..86e36c35afd47 100644 --- a/internal/services/storage/storage_blob_data_source.go +++ b/internal/services/storage/storage_blob_data_source.go @@ -5,6 +5,7 @@ package storage import ( "fmt" + "github.com/hashicorp/go-azure-helpers/lang/response" "log" "strings" "time" @@ -12,8 +13,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" ) func dataSourceStorageBlob() *pluginsdk.Resource { @@ -82,28 +83,33 @@ func dataSourceStorageBlobRead(d *pluginsdk.ResourceData, meta interface{}) erro account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", accountName, name, containerName, err) + return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %v", accountName, name, containerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Blobs Client: %s", err) + return fmt.Errorf("building Blobs Client: %v", err) } - id := blobsClient.GetResourceID(accountName, containerName, name) + accountId, err := accounts.ParseAccountID(accountName, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := blobs.NewBlobID(*accountId, containerName, name) - log.Printf("[INFO] Retrieving Storage Blob %q (Container %q / Account %q).", name, containerName, accountName) + log.Printf("[INFO] Retrieving %s", id) input := blobs.GetPropertiesInput{} - props, err := blobsClient.GetProperties(ctx, accountName, containerName, name, input) + props, err := blobsClient.GetProperties(ctx, containerName, name, input) if err != nil { - if utils.ResponseWasNotFound(props.Response) { - return fmt.Errorf("the Blob %q was not found in Container %q / Account %q", name, containerName, accountName) + if response.WasNotFound(props.HttpResponse) { + return fmt.Errorf("%s was not found", id) } - return fmt.Errorf("retrieving properties for Blob %q (Container %q / Account %q): %s", name, containerName, accountName, err) + return fmt.Errorf("retrieving properties for %s: %v", id, err) } d.Set("name", name) @@ -125,7 +131,7 @@ func dataSourceStorageBlobRead(d *pluginsdk.ResourceData, meta interface{}) erro d.Set("type", strings.TrimSuffix(string(props.BlobType), "Blob")) - d.SetId(id) + d.SetId(id.ID()) d.Set("url", id) diff --git a/internal/services/storage/storage_blob_resource.go b/internal/services/storage/storage_blob_resource.go index d7e5e837a0129..6176192e5ba7a 100644 --- a/internal/services/storage/storage_blob_resource.go +++ b/internal/services/storage/storage_blob_resource.go @@ -10,15 +10,18 @@ import ( "strings" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" ) func resourceStorageBlob() *pluginsdk.Resource { @@ -33,8 +36,8 @@ func resourceStorageBlob() *pluginsdk.Resource { 0: migration.BlobV0ToV1{}, }), - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := blobs.ParseResourceID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := blobs.ParseBlobID(id, storageDomainSuffix) return err }), @@ -175,28 +178,34 @@ func resourceStorageBlobCreate(d *pluginsdk.ResourceData, meta interface{}) erro account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", accountName, name, containerName, err) + return fmt.Errorf("retrieving Storage Account %q for Blob %q (Container %q): %v", accountName, name, containerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Blobs Client: %s", err) + return fmt.Errorf("building Blobs Client: %v", err) } - id := blobsClient.GetResourceID(accountName, containerName, name) + accountId := accounts.AccountId{ + AccountName: accountName, + DomainSuffix: storageClient.StorageDomainSuffix, + SubDomainType: accounts.BlobSubDomainType, + } + + id := blobs.NewBlobID(accountId, containerName, name) if d.IsNewResource() { input := blobs.GetPropertiesInput{} - props, err := blobsClient.GetProperties(ctx, accountName, containerName, name, input) + props, err := blobsClient.GetProperties(ctx, containerName, name, input) if err != nil { - if !utils.ResponseWasNotFound(props.Response) { - return fmt.Errorf("checking if Blob %q exists (Container %q / Account %q / Resource Group %q): %s", name, containerName, accountName, account.ResourceGroup, err) + if !response.WasNotFound(props.HttpResponse) { + return fmt.Errorf("checking for existing %s: %v", id, err) } } - if !utils.ResponseWasNotFound(props.Response) { - return tf.ImportAsExistsError("azurerm_storage_blob", id) + if !response.WasNotFound(props.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_blob", id.ID()) } } @@ -210,7 +219,7 @@ func resourceStorageBlobCreate(d *pluginsdk.ResourceData, meta interface{}) erro } } - log.Printf("[DEBUG] Creating Blob %q in Container %q within Storage Account %q..", name, containerName, accountName) + log.Printf("[DEBUG] Creating %s..", id) metaDataRaw := d.Get("metadata").(map[string]interface{}) blobInput := BlobUpload{ AccountName: accountName, @@ -229,12 +238,12 @@ func resourceStorageBlobCreate(d *pluginsdk.ResourceData, meta interface{}) erro SourceContent: d.Get("source_content").(string), SourceUri: d.Get("source_uri").(string), } - if err := blobInput.Create(ctx); err != nil { - return fmt.Errorf("creating Blob %q (Container %q / Account %q): %s", name, containerName, accountName, err) + if err = blobInput.Create(ctx); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } - log.Printf("[DEBUG] Created Blob %q in Container %q within Storage Account %q.", name, containerName, accountName) + log.Printf("[DEBUG] Created %s.", id) - d.SetId(id) + d.SetId(id.ID()) return resourceStorageBlobUpdate(d, meta) } @@ -244,69 +253,69 @@ func resourceStorageBlobUpdate(d *pluginsdk.ResourceData, meta interface{}) erro ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := blobs.ParseResourceID(d.Id()) + id, err := blobs.ParseBlobID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("parsing %q: %s", d.Id(), err) + return fmt.Errorf("parsing %q: %v", d.Id(), err) } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountName, id.BlobName, id.ContainerName, err) + return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %v", id.AccountId.AccountName, id.BlobName, id.ContainerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Blobs Client: %s", err) + return fmt.Errorf("building Blobs Client: %v", err) } if d.HasChange("content_type") || d.HasChange("cache_control") { - log.Printf("[DEBUG] Updating Properties for Blob %q (Container %q / Account %q)...", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updating Properties for %s...", id) input := blobs.SetPropertiesInput{ ContentType: utils.String(d.Get("content_type").(string)), CacheControl: utils.String(d.Get("cache_control").(string)), } - // `content_md5` is `ForceNew` but must be included in the `SetPropertiesInput` update payload or it will be zeroed on the blob. + // `content_md5` is `ForceNew` but must be included in the `SetPropertiesInput` update payload, or it will be zeroed on the blob. if contentMD5 := d.Get("content_md5").(string); contentMD5 != "" { data, err := convertHexToBase64Encoding(contentMD5) if err != nil { - return fmt.Errorf("in converting hex to base64 encoding for content_md5: %s", err) + return fmt.Errorf("converting hex to base64 encoding for content_md5: %v", err) } input.ContentMD5 = utils.String(data) } - if _, err := blobsClient.SetProperties(ctx, id.AccountName, id.ContainerName, id.BlobName, input); err != nil { - return fmt.Errorf("updating Properties for Blob %q (Container %q / Account %q): %s", id.BlobName, id.ContainerName, id.AccountName, err) + if _, err = blobsClient.SetProperties(ctx, id.ContainerName, id.BlobName, input); err != nil { + return fmt.Errorf("updating Properties for %s: %v", id, err) } - log.Printf("[DEBUG] Updated Properties for Blob %q (Container %q / Account %q).", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updated Properties for %s", id) } if d.HasChange("metadata") { - log.Printf("[DEBUG] Updating MetaData for Blob %q (Container %q / Account %q)...", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updating MetaData for %s...", id) metaDataRaw := d.Get("metadata").(map[string]interface{}) input := blobs.SetMetaDataInput{ MetaData: ExpandMetaData(metaDataRaw), } - if _, err := blobsClient.SetMetaData(ctx, id.AccountName, id.ContainerName, id.BlobName, input); err != nil { - return fmt.Errorf("updating MetaData for Blob %q (Container %q / Account %q): %s", id.BlobName, id.ContainerName, id.AccountName, err) + if _, err = blobsClient.SetMetaData(ctx, id.ContainerName, id.BlobName, input); err != nil { + return fmt.Errorf("updating MetaData for %s: %v", id, err) } - log.Printf("[DEBUG] Updated MetaData for Blob %q (Container %q / Account %q).", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updated MetaData for %s", id) } if d.HasChange("access_tier") { // this is only applicable for Gen2/BlobStorage accounts - log.Printf("[DEBUG] Updating Access Tier for Blob %q (Container %q / Account %q)...", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updating Access Tier for %s...", id) accessTier := blobs.AccessTier(d.Get("access_tier").(string)) - if _, err := blobsClient.SetTier(ctx, id.AccountName, id.ContainerName, id.BlobName, accessTier); err != nil { - return fmt.Errorf("updating Access Tier for Blob %q (Container %q / Account %q): %s", id.BlobName, id.ContainerName, id.AccountName, err) + if _, err := blobsClient.SetTier(ctx, id.ContainerName, id.BlobName, blobs.SetTierInput{Tier: accessTier}); err != nil { + return fmt.Errorf("updating Access Tier for %s: %v", id, err) } - log.Printf("[DEBUG] Updated Access Tier for Blob %q (Container %q / Account %q).", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[DEBUG] Updated Access Tier for %s", id) } return resourceStorageBlobRead(d, meta) @@ -317,42 +326,42 @@ func resourceStorageBlobRead(d *pluginsdk.ResourceData, meta interface{}) error ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := blobs.ParseResourceID(d.Id()) + id, err := blobs.ParseBlobID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("parsing %q: %s", d.Id(), err) + return fmt.Errorf("parsing %q: %v", d.Id(), err) } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountName, id.BlobName, id.ContainerName, err) + return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %v", id.AccountId.AccountName, id.BlobName, id.ContainerName, err) } if account == nil { - log.Printf("[DEBUG] Unable to locate Account %q for Blob %q (Container %q) - assuming removed & removing from state!", id.AccountName, id.BlobName, id.ContainerName) + log.Printf("[DEBUG] Unable to locate Account %q for Blob %q (Container %q) - assuming removed & removing from state!", id.AccountId.AccountName, id.BlobName, id.ContainerName) d.SetId("") return nil } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Blobs Client: %s", err) + return fmt.Errorf("building Blobs Client: %v", err) } - log.Printf("[INFO] Retrieving Storage Blob %q (Container %q / Account %q).", id.BlobName, id.ContainerName, id.AccountName) + log.Printf("[INFO] Retrieving %s", id) input := blobs.GetPropertiesInput{} - props, err := blobsClient.GetProperties(ctx, id.AccountName, id.ContainerName, id.BlobName, input) + props, err := blobsClient.GetProperties(ctx, id.ContainerName, id.BlobName, input) if err != nil { - if utils.ResponseWasNotFound(props.Response) { - log.Printf("[INFO] Blob %q was not found in Container %q / Account %q - assuming removed & removing from state...", id.BlobName, id.ContainerName, id.AccountName) + if response.WasNotFound(props.HttpResponse) { + log.Printf("[INFO] Blob %q was not found in Container %q / Account %q - assuming removed & removing from state...", id.BlobName, id.ContainerName, id.AccountId.AccountName) d.SetId("") return nil } - return fmt.Errorf("retrieving properties for Blob %q (Container %q / Account %q): %s", id.BlobName, id.ContainerName, id.AccountName, err) + return fmt.Errorf("retrieving properties for %s: %v", id, err) } d.Set("name", id.BlobName) d.Set("storage_container_name", id.ContainerName) - d.Set("storage_account_name", id.AccountName) + d.Set("storage_account_name", id.AccountId.AccountName) d.Set("access_tier", string(props.AccessTier)) d.Set("content_type", props.ContentType) @@ -363,7 +372,7 @@ func resourceStorageBlobRead(d *pluginsdk.ResourceData, meta interface{}) error if props.ContentMD5 != "" { contentMD5, err = convertBase64ToHexEncoding(props.ContentMD5) if err != nil { - return fmt.Errorf("in converting hex to base64 encoding for content_md5: %s", err) + return fmt.Errorf("converting hex to base64 encoding for content_md5: %v", err) } } d.Set("content_md5", contentMD5) @@ -371,8 +380,8 @@ func resourceStorageBlobRead(d *pluginsdk.ResourceData, meta interface{}) error d.Set("type", strings.TrimSuffix(string(props.BlobType), "Blob")) d.Set("url", d.Id()) - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %+v", err) + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) } // The CopySource is only returned if the blob hasn't been modified (e.g. metadata configured etc) // as such, we need to conditionally set this to ensure it's trackable if possible @@ -388,30 +397,29 @@ func resourceStorageBlobDelete(d *pluginsdk.ResourceData, meta interface{}) erro ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := blobs.ParseResourceID(d.Id()) + id, err := blobs.ParseBlobID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("parsing %q: %s", d.Id(), err) + return fmt.Errorf("parsing %q: %v", d.Id(), err) } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountName, id.BlobName, id.ContainerName, err) + return fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %s", id.AccountId.AccountName, id.BlobName, id.ContainerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - blobsClient, err := storageClient.BlobsClient(ctx, *account) + blobsClient, err := storageClient.BlobsDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Blobs Client: %s", err) + return fmt.Errorf("building Blobs Client: %v", err) } - log.Printf("[INFO] Deleting Blob %q from Container %q / Storage Account %q", id.BlobName, id.ContainerName, id.AccountName) input := blobs.DeleteInput{ DeleteSnapshots: true, } - if _, err := blobsClient.Delete(ctx, id.AccountName, id.ContainerName, id.BlobName, input); err != nil { - return fmt.Errorf("deleting Blob %q (Container %q / Account %q): %s", id.BlobName, id.ContainerName, id.AccountName, err) + if _, err = blobsClient.Delete(ctx, id.ContainerName, id.BlobName, input); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_blob_resource_test.go b/internal/services/storage/storage_blob_resource_test.go index 8f050af524f5d..9a0c6d3b2ba3f 100644 --- a/internal/services/storage/storage_blob_resource_test.go +++ b/internal/services/storage/storage_blob_resource_test.go @@ -10,13 +10,15 @@ import ( "os" "regexp" "testing" + "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/blobs" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/blobs" ) type StorageBlobResource struct{} @@ -469,56 +471,62 @@ func TestAccStorageBlob_archive(t *testing.T) { } func (r StorageBlobResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := blobs.ParseResourceID(state.ID) + id, err := blobs.ParseBlobID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - account, err := client.Storage.FindAccount(ctx, id.AccountName) + account, err := client.Storage.FindAccount(ctx, id.AccountId.AccountName) if err != nil { return nil, err } if account == nil { - return nil, fmt.Errorf("unable to locate Account %q for Blob %q (Container %q)", id.AccountName, id.BlobName, id.ContainerName) + return nil, fmt.Errorf("unable to locate Account %q for Blob %q (Container %q)", id.AccountId.AccountName, id.BlobName, id.ContainerName) } - blobsClient, err := client.Storage.BlobsClient(ctx, *account) + blobsClient, err := client.Storage.BlobsDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Blobs Client: %+v", err) } input := blobs.GetPropertiesInput{} - resp, err := blobsClient.GetProperties(ctx, id.AccountName, id.ContainerName, id.BlobName, input) + resp, err := blobsClient.GetProperties(ctx, id.ContainerName, id.BlobName, input) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving Blob %q (Container %q / Account %q): %+v", id.BlobName, id.ContainerName, id.AccountName, err) + return nil, fmt.Errorf("retrieving Blob %q (Container %q / Account %q): %+v", id.BlobName, id.ContainerName, id.AccountId.AccountName, err) } return utils.Bool(true), nil } func (r StorageBlobResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := blobs.ParseResourceID(state.ID) + id, err := blobs.ParseBlobID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - account, err := client.Storage.FindAccount(ctx, id.AccountName) + account, err := client.Storage.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return nil, fmt.Errorf("retrievign Account %q for Blob %q (Container %q): %+v", id.AccountName, id.BlobName, id.ContainerName, err) + return nil, fmt.Errorf("retrieving Account %q for Blob %q (Container %q): %+v", id.AccountId.AccountName, id.BlobName, id.ContainerName, err) } - blobsClient, err := client.Storage.BlobsClient(ctx, *account) + blobsClient, err := client.Storage.BlobsDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Blobs Client: %+v", err) } input := blobs.DeleteInput{ DeleteSnapshots: false, } - if _, err := blobsClient.Delete(ctx, id.AccountName, id.ContainerName, id.BlobName, input); err != nil { - return nil, fmt.Errorf("deleting Blob %q (Container %q / Account %q): %+v", id.BlobName, id.ContainerName, id.AccountName, err) + if _, err = blobsClient.Delete(ctx, id.ContainerName, id.BlobName, input); err != nil { + return nil, fmt.Errorf("deleting Blob %q (Container %q / Account %q): %+v", id.BlobName, id.ContainerName, id.AccountId.AccountName, err) } return utils.Bool(true), nil } func (r StorageBlobResource) blobMatchesFile(kind blobs.BlobType, filePath string) func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { return func(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) error { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, time.Now().Add(10*time.Minute)) + defer cancel() + } + name := state.Attributes["name"] containerName := state.Attributes["storage_container_name"] accountName := state.Attributes["storage_account_name"] @@ -531,14 +539,14 @@ func (r StorageBlobResource) blobMatchesFile(kind blobs.BlobType, filePath strin return fmt.Errorf("Unable to locate Storage Account %q!", accountName) } - client, err := clients.Storage.BlobsClient(ctx, *account) + client, err := clients.Storage.BlobsDataPlaneClient(ctx, *account, clients.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Blobs Client: %s", err) } // first check the type getPropsInput := blobs.GetPropertiesInput{} - props, err := client.GetProperties(ctx, accountName, containerName, name, getPropsInput) + props, err := client.GetProperties(ctx, containerName, name, getPropsInput) if err != nil { return fmt.Errorf("retrieving Properties for Blob %q (Container %q): %s", name, containerName, err) } @@ -549,20 +557,24 @@ func (r StorageBlobResource) blobMatchesFile(kind blobs.BlobType, filePath strin // then compare the content itself getInput := blobs.GetInput{} - actualProps, err := client.Get(ctx, accountName, containerName, name, getInput) + actualProps, err := client.Get(ctx, containerName, name, getInput) if err != nil { return fmt.Errorf("retrieving Blob %q (Container %q): %s", name, containerName, err) } actualContents := actualProps.Contents + if actualContents == nil { + return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) returned nil contents", name, containerName) + } + // local file for comparison expectedContents, err := os.ReadFile(filePath) if err != nil { return err } - if string(actualContents) != string(expectedContents) { + if string(*actualContents) != string(expectedContents) { return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) does not match contents", name, containerName) } diff --git a/internal/services/storage/storage_container_data_source.go b/internal/services/storage/storage_container_data_source.go index dc7a74da8ef0c..a8988bbd8fe1e 100644 --- a/internal/services/storage/storage_container_data_source.go +++ b/internal/services/storage/storage_container_data_source.go @@ -69,34 +69,35 @@ func dataSourceStorageContainerRead(d *pluginsdk.ResourceData, meta interface{}) account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %s", accountName, containerName, err) + return fmt.Errorf("retrieving Storage Account %q for Container %q: %v", accountName, containerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Account %q for Storage Container %q", accountName, containerName) + return fmt.Errorf("locating Storage Account %q for Container %q", accountName, containerName) } - client, err := storageClient.ContainersClient(ctx, *account) + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Containers Client for Storage Account %q (Resource Group %q): %s", accountName, account.ResourceGroup, err) + return fmt.Errorf("building Containers Client: %v", err) } - id := parse.NewStorageContainerDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, containerName).ID() - d.SetId(id) + id := parse.NewStorageContainerDataPlaneId(accountName, storageClient.AzureEnvironment.StorageEndpointSuffix, containerName) - props, err := client.Get(ctx, account.ResourceGroup, accountName, containerName) + props, err := client.Get(ctx, containerName) if err != nil { - return fmt.Errorf("retrieving Container %q (Account %q / Resource Group %q): %s", containerName, accountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if props == nil { - return fmt.Errorf("Container %q was not found in Account %q / Resource Group %q", containerName, accountName, account.ResourceGroup) + return fmt.Errorf("retrieving %s: result was nil", id) } + d.SetId(id.ID()) + d.Set("name", containerName) d.Set("storage_account_name", accountName) d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %+v", err) + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) } d.Set("has_immutability_policy", props.HasImmutabilityPolicy) diff --git a/internal/services/storage/storage_container_resource.go b/internal/services/storage/storage_container_resource.go index c4dc99de623e2..7237d42331def 100644 --- a/internal/services/storage/storage_container_resource.go +++ b/internal/services/storage/storage_container_resource.go @@ -11,13 +11,14 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/blob/containers" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/containers" ) func resourceStorageContainer() *pluginsdk.Resource { @@ -27,8 +28,8 @@ func resourceStorageContainer() *pluginsdk.Resource { Delete: resourceStorageContainerDelete, Update: resourceStorageContainerUpdate, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.StorageContainerDataPlaneID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := parse.StorageContainerDataPlaneID(id, storageDomainSuffix) return err }), @@ -106,37 +107,39 @@ func resourceStorageContainerCreate(d *pluginsdk.ResourceData, meta interface{}) account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %s", accountName, containerName, err) + return fmt.Errorf("retrieving Account %q for Container %q: %v", accountName, containerName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } - client, err := storageClient.ContainersClient(ctx, *account) + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building storage client: %+v", err) + return fmt.Errorf("building storage client: %v", err) } - id := parse.NewStorageContainerDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, containerName).ID() - exists, err := client.Exists(ctx, account.ResourceGroup, accountName, containerName) + id := parse.NewStorageContainerDataPlaneId(accountName, storageClient.AzureEnvironment.StorageEndpointSuffix, containerName) + + exists, err := client.Exists(ctx, containerName) if err != nil { - return err + return fmt.Errorf("checking for existing %s: %v", id, err) } if exists != nil && *exists { - return tf.ImportAsExistsError("azurerm_storage_container", id) + return tf.ImportAsExistsError("azurerm_storage_container", id.ID()) } - log.Printf("[INFO] Creating Container %q in Storage Account %q", containerName, accountName) + log.Printf("[INFO] Creating %s", id) input := containers.CreateInput{ AccessLevel: accessLevel, MetaData: metaData, } - if err := client.Create(ctx, account.ResourceGroup, accountName, containerName, input); err != nil { - return fmt.Errorf("failed creating container: %+v", err) + if err = client.Create(ctx, containerName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } - d.SetId(id) + d.SetId(id.ID()) + return resourceStorageContainerRead(d, meta) } @@ -145,45 +148,54 @@ func resourceStorageContainerUpdate(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageContainerDataPlaneID(d.Id()) + id, err := parse.StorageContainerDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) - } - client, err := storageClient.ContainersClient(ctx, *account) - if err != nil { - return fmt.Errorf("building Containers Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } if d.HasChange("container_access_type") { - log.Printf("[DEBUG] Updating the Access Control for Container %q (Storage Account %q / Resource Group %q)..", id.Name, id.AccountName, account.ResourceGroup) + log.Printf("[DEBUG] Updating Access Level for %s...", id) + + // Updating metadata does not work with AAD authentication, returns a cryptic 404 + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + accessLevelRaw := d.Get("container_access_type").(string) accessLevel := expandStorageContainerAccessLevel(accessLevelRaw) - if err := client.UpdateAccessLevel(ctx, account.ResourceGroup, id.AccountName, id.Name, accessLevel); err != nil { - return fmt.Errorf("updating the Access Control for Container %q (Storage Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + if err = client.UpdateAccessLevel(ctx, id.Name, accessLevel); err != nil { + return fmt.Errorf("updating Access Level for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the Access Control for Container %q (Storage Account %q / Resource Group %q)", id.Name, id.AccountName, account.ResourceGroup) + log.Printf("[DEBUG] Updated Access Level for %s", id) } if d.HasChange("metadata") { - log.Printf("[DEBUG] Updating the MetaData for Container %q (Storage Account %q / Resource Group %q)..", id.Name, id.AccountName, account.ResourceGroup) + log.Printf("[DEBUG] Updating Metadata for %s...", id) + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Containers Client: %v", err) + } + metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - if err := client.UpdateMetaData(ctx, account.ResourceGroup, id.AccountName, id.Name, metaData); err != nil { - return fmt.Errorf("updating the MetaData for Container %q (Storage Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + if err = client.UpdateMetaData(ctx, id.Name, metaData); err != nil { + return fmt.Errorf("updating Metadata for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the MetaData for Container %q (Storage Account %q / Resource Group %q)", id.Name, id.AccountName, account.ResourceGroup) + log.Printf("[DEBUG] Updated Metadata for %s", id) } return resourceStorageContainerRead(d, meta) @@ -195,28 +207,29 @@ func resourceStorageContainerRead(d *pluginsdk.ResourceData, meta interface{}) e ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageContainerDataPlaneID(d.Id()) + id, err := parse.StorageContainerDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountName, id.Name, err) } if account == nil { log.Printf("[DEBUG] Unable to locate Account %q for Storage Container %q - assuming removed & removing from state", id.AccountName, id.Name) d.SetId("") return nil } - client, err := storageClient.ContainersClient(ctx, *account) + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Containers Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building Containers Client: %v", err) } - props, err := client.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + props, err := client.Get(ctx, id.Name) if err != nil { - return fmt.Errorf("retrieving Container %q (Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if props == nil { log.Printf("[DEBUG] Container %q was not found in Account %q / Resource Group %q - assuming removed & removing from state", id.Name, id.AccountName, account.ResourceGroup) @@ -229,8 +242,8 @@ func resourceStorageContainerRead(d *pluginsdk.ResourceData, meta interface{}) e d.Set("container_access_type", flattenStorageContainerAccessLevel(props.AccessLevel)) - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %+v", err) + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) } d.Set("has_immutability_policy", props.HasImmutabilityPolicy) @@ -247,25 +260,26 @@ func resourceStorageContainerDelete(d *pluginsdk.ResourceData, meta interface{}) ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageContainerDataPlaneID(d.Id()) + id, err := parse.StorageContainerDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Container %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Container %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.ContainersClient(ctx, *account) + + client, err := storageClient.ContainersDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Containers Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building Containers Client: %v", err) } - if err := client.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { - return fmt.Errorf("deleting Container %q (Storage Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + if err = client.Delete(ctx, id.Name); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_container_resource_test.go b/internal/services/storage/storage_container_resource_test.go index cf0a8c0ae07b5..37c51c0fe25e2 100644 --- a/internal/services/storage/storage_container_resource_test.go +++ b/internal/services/storage/storage_container_resource_test.go @@ -96,14 +96,14 @@ func TestAccStorageContainer_update(t *testing.T) { data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.update(data, "private"), + Config: r.update(data, "private", "yes"), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container_access_type").HasValue("private"), ), }, { - Config: r.update(data, "container"), + Config: r.update(data, "container", "no"), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), check.That(data.ResourceName).Key("container_access_type").HasValue("container"), @@ -187,7 +187,7 @@ func TestAccStorageContainer_web(t *testing.T) { } func (r StorageContainerResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageContainerDataPlaneID(state.ID) + id, err := parse.StorageContainerDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -199,11 +199,11 @@ func (r StorageContainerResource) Exists(ctx context.Context, client *clients.Cl return nil, fmt.Errorf("unable to locate Storage Account %q", id.AccountName) } - containersClient, err := client.Storage.ContainersClient(ctx, *account) + containersClient, err := client.Storage.ContainersDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Containers Client: %+v", err) } - prop, err := containersClient.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + prop, err := containersClient.Get(ctx, id.Name) if err != nil { return nil, fmt.Errorf("retrieving Container %q (Account %q / Resource Group %q): %+v", id.Name, id.AccountName, account.ResourceGroup, err) } @@ -211,7 +211,7 @@ func (r StorageContainerResource) Exists(ctx context.Context, client *clients.Cl } func (r StorageContainerResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageContainerDataPlaneID(state.ID) + id, err := parse.StorageContainerDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -222,11 +222,11 @@ func (r StorageContainerResource) Destroy(ctx context.Context, client *clients.C if account == nil { return nil, fmt.Errorf("unable to locate Storage Account %q", id.AccountName) } - containersClient, err := client.Storage.ContainersClient(ctx, *account) + containersClient, err := client.Storage.ContainersDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Containers Client: %+v", err) } - if err := containersClient.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { + if err := containersClient.Delete(ctx, id.Name); err != nil { return nil, fmt.Errorf("deleting Container %q (Account %q / Resource Group %q): %+v", id.Name, id.AccountName, account.ResourceGroup, err) } return utils.Bool(true), nil @@ -290,7 +290,7 @@ resource "azurerm_storage_container" "import" { `, template) } -func (r StorageContainerResource) update(data acceptance.TestData, accessType string) string { +func (r StorageContainerResource) update(data acceptance.TestData, accessType, metadataVal string) string { template := r.template(data) return fmt.Sprintf(` %s @@ -299,8 +299,12 @@ resource "azurerm_storage_container" "test" { name = "vhds" storage_account_name = azurerm_storage_account.test.name container_access_type = "%s" + metadata = { + foo = "bar" + test = "%s" + } } -`, template, accessType) +`, template, accessType, metadataVal) } func (r StorageContainerResource) metaData(data acceptance.TestData) string { diff --git a/internal/services/storage/storage_containers_data_source.go b/internal/services/storage/storage_containers_data_source.go index 40c5ec7ff49a8..dd82a159bc3f9 100644 --- a/internal/services/storage/storage_containers_data_source.go +++ b/internal/services/storage/storage_containers_data_source.go @@ -103,7 +103,7 @@ func (r storageContainersDataSource) Read() sdk.ResourceFunc { return fmt.Errorf("retrieving %s: %+v", id, err) } - plan.Containers = flattenStorageContainersContainers(resp.Items, id.StorageAccountName, metadata.Client.Storage.Environment.StorageEndpointSuffix, plan.NamePrefix) + plan.Containers = flattenStorageContainersContainers(resp.Items, id.StorageAccountName, metadata.Client.Storage.AzureEnvironment.StorageEndpointSuffix, plan.NamePrefix) if err := metadata.Encode(&plan); err != nil { return fmt.Errorf("encoding %s: %+v", id, err) diff --git a/internal/services/storage/storage_data_lake_gen2_filesystem_resource.go b/internal/services/storage/storage_data_lake_gen2_filesystem_resource.go index 4998f22fc693b..ab968ed009c1e 100644 --- a/internal/services/storage/storage_data_lake_gen2_filesystem_resource.go +++ b/internal/services/storage/storage_data_lake_gen2_filesystem_resource.go @@ -10,6 +10,7 @@ import ( "regexp" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -18,8 +19,9 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/paths" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/filesystems" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/paths" "github.com/tombuildsstuff/giovanni/storage/accesscontrol" ) @@ -31,22 +33,22 @@ func resourceStorageDataLakeGen2FileSystem() *pluginsdk.Resource { Delete: resourceStorageDataLakeGen2FileSystemDelete, Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { - _, err := filesystems.ParseResourceID(id) + _, err := filesystems.ParseFileSystemID(id, "") // TODO: actual domain suffix needed here! return err }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { storageClients := meta.(*clients.Client).Storage - id, err := filesystems.ParseResourceID(d.Id()) + id, err := filesystems.ParseFileSystemID(d.Id(), "") // TODO: actual domain suffix needed here! if err != nil { return []*pluginsdk.ResourceData{d}, fmt.Errorf("parsing ID %q for import of Data Lake Gen2 File System: %v", d.Id(), err) } // we then need to look up the Storage Account ID - account, err := storageClients.FindAccount(ctx, id.AccountName) + account, err := storageClients.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving Account %q for Data Lake Gen2 File System %q: %s", id.AccountName, id.DirectoryName, err) + return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving Account %q for Data Lake Gen2 File System %q: %s", id.AccountId.AccountName, id.FileSystemName, err) } if account == nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return []*pluginsdk.ResourceData{d}, fmt.Errorf("Unable to locate Storage Account %q!", id.AccountId.AccountName) } d.Set("storage_account_id", account.ID) @@ -127,31 +129,32 @@ func resourceStorageDataLakeGen2FileSystem() *pluginsdk.Resource { } func resourceStorageDataLakeGen2FileSystemCreate(d *pluginsdk.ResourceData, meta interface{}) error { - accountsClient := meta.(*clients.Client).Storage.AccountsClient - client := meta.(*clients.Client).Storage.FileSystemsClient - pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + accountsClient := storageClient.AccountsClient + client := storageClient.FileSystemsClient + pathClient := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - storageID, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) + storageId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { return err } - aceRaw := d.Get("ace").(*pluginsdk.Set).List() - acl, err := ExpandDataLakeGen2AceList(aceRaw) - if err != nil { - return fmt.Errorf("parsing ace list: %s", err) - } - // confirm the storage account exists, otherwise Data Plane API requests will fail - storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroupName, storageID.StorageAccountName, "") + storageAccount, err := accountsClient.GetProperties(ctx, storageId.ResourceGroupName, storageId.StorageAccountName, "") if err != nil { if utils.ResponseWasNotFound(storageAccount.Response) { - return fmt.Errorf("%s was not found", storageID) + return fmt.Errorf("%s was not found", storageId) } - return fmt.Errorf("checking for existence of %s: %+v", storageID, err) + return fmt.Errorf("checking for existence of %s: %v", storageId, err) + } + + aceRaw := d.Get("ace").(*pluginsdk.Set).List() + acl, err := ExpandDataLakeGen2AceList(aceRaw) + if err != nil { + return fmt.Errorf("parsing ace list: %v", err) } if acl != nil && (storageAccount.AccountProperties == nil || @@ -164,25 +167,30 @@ func resourceStorageDataLakeGen2FileSystemCreate(d *pluginsdk.ResourceData, meta propertiesRaw := d.Get("properties").(map[string]interface{}) properties := ExpandMetaData(propertiesRaw) - id := client.GetResourceID(storageID.StorageAccountName, fileSystemName) + accountId, err := accounts.ParseAccountID(d.Get("storage_account_id").(string), storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := filesystems.NewFileSystemID(*accountId, fileSystemName) - resp, err := client.GetProperties(ctx, storageID.StorageAccountName, fileSystemName) + resp, err := client.GetProperties(ctx, fileSystemName) if err != nil { - if !utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("checking for existence of existing File System %q in %s: %+v", fileSystemName, storageID, err) + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("checking for existence of existing File System %q in %s: %v", fileSystemName, accountId, err) } } - if !utils.ResponseWasNotFound(resp.Response) { - return tf.ImportAsExistsError("azurerm_storage_data_lake_gen2_filesystem", id) + if !response.WasNotFound(resp.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_data_lake_gen2_filesystem", id.ID()) } - log.Printf("[INFO] Creating File System %q in %s.", fileSystemName, storageID) + log.Printf("[INFO] Creating %s...", id) input := filesystems.CreateInput{ Properties: properties, } - if _, err := client.Create(ctx, storageID.StorageAccountName, fileSystemName, input); err != nil { - return fmt.Errorf("creating File System %q in %s: %+v", fileSystemName, storageID, err) + if _, err = client.Create(ctx, fileSystemName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } var owner *string @@ -199,7 +207,7 @@ func resourceStorageDataLakeGen2FileSystemCreate(d *pluginsdk.ResourceData, meta if acl != nil || owner != nil || group != nil { var aclString *string if acl != nil { - log.Printf("[INFO] Creating acl %q in File System %q in %s", acl, fileSystemName, storageID) + log.Printf("[INFO] Creating ACL %q for %s", acl, id) v := acl.String() aclString = &v } @@ -208,23 +216,24 @@ func resourceStorageDataLakeGen2FileSystemCreate(d *pluginsdk.ResourceData, meta Owner: owner, Group: group, } - if _, err := pathClient.SetAccessControl(ctx, storageID.StorageAccountName, fileSystemName, "/", accessControlInput); err != nil { - return fmt.Errorf("setting access control for root path in File System %q in %s: %+v", fileSystemName, storageID, err) + if _, err = pathClient.SetAccessControl(ctx, fileSystemName, "/", accessControlInput); err != nil { + return fmt.Errorf("setting access control for root path in File System %q in %s: %v", fileSystemName, accountId, err) } } - d.SetId(id) + d.SetId(id.ID()) return resourceStorageDataLakeGen2FileSystemRead(d, meta) } func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta interface{}) error { - accountsClient := meta.(*clients.Client).Storage.AccountsClient - client := meta.(*clients.Client).Storage.FileSystemsClient - pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + accountsClient := storageClient.AccountsClient + client := storageClient.FileSystemsClient + pathClient := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := filesystems.ParseResourceID(d.Id()) + id, err := filesystems.ParseFileSystemID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } @@ -237,7 +246,7 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta aceRaw := d.Get("ace").(*pluginsdk.Set).List() acl, err := ExpandDataLakeGen2AceList(aceRaw) if err != nil { - return fmt.Errorf("parsing ace list: %s", err) + return fmt.Errorf("parsing ace list: %v", err) } // confirm the storage account exists, otherwise Data Plane API requests will fail @@ -247,7 +256,7 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta return fmt.Errorf("%s was not found", storageId) } - return fmt.Errorf("checking for existence of %s: %+v", storageId, err) + return fmt.Errorf("checking for existence of %s: %v", storageId, err) } if acl != nil && (storageAccount.AccountProperties == nil || @@ -259,12 +268,12 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta propertiesRaw := d.Get("properties").(map[string]interface{}) properties := ExpandMetaData(propertiesRaw) - log.Printf("[INFO] Updating Properties for File System %q in Storage Account %q.", id.DirectoryName, id.AccountName) + log.Printf("[INFO] Updating Properties for %s...", id) input := filesystems.SetPropertiesInput{ Properties: properties, } - if _, err = client.SetProperties(ctx, id.AccountName, id.DirectoryName, input); err != nil { - return fmt.Errorf("updating Properties for File System %q in Storage Account %q: %s", id.DirectoryName, id.AccountName, err) + if _, err = client.SetProperties(ctx, id.FileSystemName, input); err != nil { + return fmt.Errorf("updating Properties for %s: %v", id, err) } var owner *string @@ -281,7 +290,7 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta if acl != nil || owner != nil || group != nil { var aclString *string if acl != nil { - log.Printf("[INFO] Creating acl %q in File System %q in Storage Account %q.", acl, id.DirectoryName, id.AccountName) + log.Printf("[INFO] Creating ACL %q for %s...", acl, id) v := acl.String() aclString = &v } @@ -290,8 +299,8 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta Owner: owner, Group: group, } - if _, err := pathClient.SetAccessControl(ctx, id.AccountName, id.DirectoryName, "/", accessControlInput); err != nil { - return fmt.Errorf("setting access control for root path in File System %q in Storage Account %q: %s", id.DirectoryName, id.AccountName, err) + if _, err = pathClient.SetAccessControl(ctx, id.FileSystemName, "/", accessControlInput); err != nil { + return fmt.Errorf("setting access control for root path in File System %q in Storage Account %q: %v", id.FileSystemName, id.AccountId.AccountName, err) } } @@ -299,49 +308,50 @@ func resourceStorageDataLakeGen2FileSystemUpdate(d *pluginsdk.ResourceData, meta } func resourceStorageDataLakeGen2FileSystemRead(d *pluginsdk.ResourceData, meta interface{}) error { - accountsClient := meta.(*clients.Client).Storage.AccountsClient - client := meta.(*clients.Client).Storage.FileSystemsClient - pathClient := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + accountsClient := storageClient.AccountsClient + client := storageClient.FileSystemsClient + pathClient := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := filesystems.ParseResourceID(d.Id()) + id, err := filesystems.ParseFileSystemID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - storageID, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) + storageId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { return err } // confirm the storage account exists, otherwise Data Plane API requests will fail - storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroupName, storageID.StorageAccountName, "") + storageAccount, err := accountsClient.GetProperties(ctx, storageId.ResourceGroupName, storageId.StorageAccountName, "") if err != nil { if utils.ResponseWasNotFound(storageAccount.Response) { - log.Printf("[INFO] Storage Account %q does not exist removing from state...", id.AccountName) + log.Printf("[INFO] Storage Account %q does not exist removing from state...", id.AccountId.AccountName) d.SetId("") return nil } - return fmt.Errorf("checking for existence of %s for File System %q: %+v", storageID, id.DirectoryName, err) + return fmt.Errorf("checking for existence of %s for File System %q: %+v", storageId, id.FileSystemName, err) } - resp, err := client.GetProperties(ctx, id.AccountName, id.DirectoryName) + resp, err := client.GetProperties(ctx, id.FileSystemName) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[INFO] File System %q does not exist in Storage Account %q - removing from state...", id.DirectoryName, id.AccountName) + if response.WasNotFound(resp.HttpResponse) { + log.Printf("[INFO] File System %q does not exist in Storage Account %q - removing from state...", id.FileSystemName, id.AccountId.AccountName) d.SetId("") return nil } - return fmt.Errorf("retrieving File System %q in Storage Account %q: %+v", id.DirectoryName, id.AccountName, err) + return fmt.Errorf("retrieving %s: %v", id, err) } - d.Set("name", id.DirectoryName) + d.Set("name", id.FileSystemName) - if err := d.Set("properties", resp.Properties); err != nil { - return fmt.Errorf("setting `properties`: %+v", err) + if err = d.Set("properties", resp.Properties); err != nil { + return fmt.Errorf("setting `properties`: %v", err) } var ace []interface{} @@ -351,7 +361,7 @@ func resourceStorageDataLakeGen2FileSystemRead(d *pluginsdk.ResourceData, meta i *storageAccount.AccountProperties.IsHnsEnabled { // The above `getStatus` API request doesn't return the ACLs // Have to make a `getAccessControl` request, but that doesn't return all fields either! - pathResponse, err := pathClient.GetProperties(ctx, id.AccountName, id.DirectoryName, "/", paths.GetPropertiesActionGetAccessControl) + pathResponse, err := pathClient.GetProperties(ctx, id.FileSystemName, "/", paths.GetPropertiesInput{Action: paths.GetPropertiesActionGetAccessControl}) if err == nil { acl, err := accesscontrol.ParseACL(pathResponse.ACL) if err != nil { @@ -370,19 +380,20 @@ func resourceStorageDataLakeGen2FileSystemRead(d *pluginsdk.ResourceData, meta i } func resourceStorageDataLakeGen2FileSystemDelete(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Storage.FileSystemsClient + storageClient := meta.(*clients.Client).Storage + client := storageClient.FileSystemsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := filesystems.ParseResourceID(d.Id()) + id, err := filesystems.ParseFileSystemID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - resp, err := client.Delete(ctx, id.AccountName, id.DirectoryName) + resp, err := client.Delete(ctx, id.FileSystemName) if err != nil { - if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting File System %q in Storage Account %q: %+v", id.DirectoryName, id.AccountName, err) + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("deleting %s: %v", id, err) } } diff --git a/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go b/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go index 8399d2b0d212e..6ed02552aa163 100644 --- a/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go +++ b/internal/services/storage/storage_data_lake_gen2_filesystem_resource_test.go @@ -6,6 +6,7 @@ package storage_test import ( "context" "fmt" + "github.com/hashicorp/go-azure-helpers/lang/response" "testing" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" @@ -13,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/filesystems" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/filesystems" ) type StorageDataLakeGen2FileSystemResource struct{} @@ -147,27 +148,27 @@ func TestAccStorageDataLakeGen2FileSystem_withSuperUsers(t *testing.T) { } func (r StorageDataLakeGen2FileSystemResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := filesystems.ParseResourceID(state.ID) + id, err := filesystems.ParseFileSystemID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - resp, err := client.Storage.FileSystemsClient.GetProperties(ctx, id.AccountName, id.DirectoryName) + resp, err := client.Storage.FileSystemsClient.GetProperties(ctx, id.FileSystemName) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving File System %q (Account %q): %+v", id.DirectoryName, id.AccountName, err) + return nil, fmt.Errorf("retrieving File System %q (Account %q): %+v", id.FileSystemName, id.AccountId.AccountName, err) } return utils.Bool(true), nil } func (r StorageDataLakeGen2FileSystemResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := filesystems.ParseResourceID(state.ID) + id, err := filesystems.ParseFileSystemID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - if _, err := client.Storage.FileSystemsClient.Delete(ctx, id.AccountName, id.DirectoryName); err != nil { - return nil, fmt.Errorf("deleting File System %q (Account %q): %+v", id.DirectoryName, id.AccountName, err) + if _, err := client.Storage.FileSystemsClient.Delete(ctx, id.FileSystemName); err != nil { + return nil, fmt.Errorf("deleting File System %q (Account %q): %+v", id.FileSystemName, id.AccountId.AccountName, err) } return utils.Bool(true), nil } diff --git a/internal/services/storage/storage_data_lake_gen2_path_resource.go b/internal/services/storage/storage_data_lake_gen2_path_resource.go index 8457260a94c61..ec53442498d5e 100644 --- a/internal/services/storage/storage_data_lake_gen2_path_resource.go +++ b/internal/services/storage/storage_data_lake_gen2_path_resource.go @@ -9,6 +9,7 @@ import ( "log" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" @@ -17,7 +18,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/paths" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/paths" "github.com/tombuildsstuff/giovanni/storage/accesscontrol" ) @@ -29,27 +31,27 @@ func resourceStorageDataLakeGen2Path() *pluginsdk.Resource { Delete: resourceStorageDataLakeGen2PathDelete, Importer: pluginsdk.ImporterValidatingResourceIdThen(func(id string) error { - _, err := paths.ParseResourceID(id) + _, err := paths.ParsePathID(id, "") // TODO: actual domain suffix needed here! return err }, func(ctx context.Context, d *pluginsdk.ResourceData, meta interface{}) ([]*pluginsdk.ResourceData, error) { storageClients := meta.(*clients.Client).Storage - id, err := paths.ParseResourceID(d.Id()) + id, err := paths.ParsePathID(d.Id(), "") // TODO: actual domain suffix needed here! if err != nil { return []*pluginsdk.ResourceData{d}, fmt.Errorf("parsing ID %q for import of Data Lake Gen2 Path: %v", d.Id(), err) } // we then need to look up the Storage Account ID - account, err := storageClients.FindAccount(ctx, id.AccountName) + account, err := storageClients.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving Account %q for Data Lake Gen2 Path %q in File System %q: %s", id.AccountName, id.Path, id.FileSystemName, err) + return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving Account %q for Data Lake Gen2 Path %q in File System %q: %s", id.AccountId.AccountName, id.Path, id.FileSystemName, err) } if account == nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return []*pluginsdk.ResourceData{d}, fmt.Errorf("Unable to locate Storage Account %q!", id.AccountId.AccountName) } - if _, err = storageClients.FileSystemsClient.GetProperties(ctx, id.AccountName, id.FileSystemName); err != nil { - return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving File System %q for Data Lake Gen 2 Path %q in Account %q: %s", id.FileSystemName, id.Path, id.AccountName, err) + if _, err = storageClients.FileSystemsClient.GetProperties(ctx, id.FileSystemName); err != nil { + return []*pluginsdk.ResourceData{d}, fmt.Errorf("retrieving File System %q for Data Lake Gen 2 Path %q in Account %q: %s", id.FileSystemName, id.Path, id.AccountId.AccountName, err) } d.Set("storage_account_id", account.ID) @@ -142,38 +144,44 @@ func resourceStorageDataLakeGen2Path() *pluginsdk.Resource { } func resourceStorageDataLakeGen2PathCreate(d *pluginsdk.ResourceData, meta interface{}) error { - accountsClient := meta.(*clients.Client).Storage.AccountsClient - client := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + accountsClient := storageClient.AccountsClient + client := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() - storageID, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) + storageId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { return err } // confirm the storage account exists, otherwise Data Plane API requests will fail - storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroupName, storageID.StorageAccountName, "") + storageAccount, err := accountsClient.GetProperties(ctx, storageId.ResourceGroupName, storageId.StorageAccountName, "") if err != nil { if utils.ResponseWasNotFound(storageAccount.Response) { - return fmt.Errorf("%s was not found", storageID) + return fmt.Errorf("%s was not found", storageId) } - return fmt.Errorf("checking for existence of %s: %+v", storageID, err) + return fmt.Errorf("checking for existence of %s: %v", storageId, err) } fileSystemName := d.Get("filesystem_name").(string) path := d.Get("path").(string) - id := client.GetResourceID(storageID.StorageAccountName, fileSystemName, path) - resp, err := client.GetProperties(ctx, storageID.StorageAccountName, fileSystemName, path, paths.GetPropertiesActionGetStatus) + accountId, err := accounts.ParseAccountID(d.Get("storage_account_id").(string), storageClient.StorageDomainSuffix) if err != nil { - if !utils.ResponseWasNotFound(resp.Response) { - return fmt.Errorf("checking for existence of existing Path %q in File System %q in %s: %+v", path, fileSystemName, storageID, err) + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := paths.NewPathID(*accountId, fileSystemName, path) + resp, err := client.GetProperties(ctx, fileSystemName, path, paths.GetPropertiesInput{Action: paths.GetPropertiesActionGetStatus}) + if err != nil { + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("checking for existence of existing Path %q in File System %q in %s: %v", path, fileSystemName, storageId, err) } } - if !utils.ResponseWasNotFound(resp.Response) { - return tf.ImportAsExistsError("azurerm_storage_data_lake_gen2_path", id) + if !response.WasNotFound(resp.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_data_lake_gen2_path", id.ID()) } resourceString := d.Get("resource").(string) @@ -182,12 +190,12 @@ func resourceStorageDataLakeGen2PathCreate(d *pluginsdk.ResourceData, meta inter case "directory": resource = paths.PathResourceDirectory default: - return fmt.Errorf("Unhandled resource type %q", resourceString) + return fmt.Errorf("unhandled resource type %q", resourceString) } aceRaw := d.Get("ace").(*pluginsdk.Set).List() acl, err := ExpandDataLakeGen2AceList(aceRaw) if err != nil { - return fmt.Errorf("parsing ace list: %s", err) + return fmt.Errorf("parsing ace list: %v", err) } var owner *string @@ -201,13 +209,13 @@ func resourceStorageDataLakeGen2PathCreate(d *pluginsdk.ResourceData, meta inter group = &sv } - log.Printf("[INFO] Creating Path %q in File System %q in %s.", path, fileSystemName, storageID) + log.Printf("[INFO] Creating %s...", id) input := paths.CreateInput{ Resource: resource, } - if _, err := client.Create(ctx, storageID.StorageAccountName, fileSystemName, path, input); err != nil { - return fmt.Errorf("creating Path %q in File System %q in %s: %+v", path, fileSystemName, storageID.StorageAccountName, err) + if _, err = client.Create(ctx, fileSystemName, path, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } if acl != nil || owner != nil || group != nil { @@ -221,27 +229,28 @@ func resourceStorageDataLakeGen2PathCreate(d *pluginsdk.ResourceData, meta inter Owner: owner, Group: group, } - if _, err := client.SetAccessControl(ctx, storageID.StorageAccountName, fileSystemName, path, accessControlInput); err != nil { - return fmt.Errorf("setting access control for Path %q in File System %q in %s: %+v", path, fileSystemName, storageID, err) + if _, err = client.SetAccessControl(ctx, fileSystemName, path, accessControlInput); err != nil { + return fmt.Errorf("setting access control for %s: %+v", id, err) } } - d.SetId(id) + d.SetId(id.ID()) return resourceStorageDataLakeGen2PathRead(d, meta) } func resourceStorageDataLakeGen2PathUpdate(d *pluginsdk.ResourceData, meta interface{}) error { - accountsClient := meta.(*clients.Client).Storage.AccountsClient - client := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + accountsClient := storageClient.AccountsClient + client := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := paths.ParseResourceID(d.Id()) + id, err := paths.ParsePathID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - storageID, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) + storageId, err := commonids.ParseStorageAccountID(d.Get("storage_account_id").(string)) if err != nil { return err } @@ -251,7 +260,7 @@ func resourceStorageDataLakeGen2PathUpdate(d *pluginsdk.ResourceData, meta inter aceRaw := d.Get("ace").(*pluginsdk.Set).List() acl, err := ExpandDataLakeGen2AceList(aceRaw) if err != nil { - return fmt.Errorf("parsing ace list: %s", err) + return fmt.Errorf("parsing ace list: %v", err) } var owner *string @@ -266,13 +275,13 @@ func resourceStorageDataLakeGen2PathUpdate(d *pluginsdk.ResourceData, meta inter } // confirm the storage account exists, otherwise Data Plane API requests will fail - storageAccount, err := accountsClient.GetProperties(ctx, storageID.ResourceGroupName, storageID.StorageAccountName, "") + storageAccount, err := accountsClient.GetProperties(ctx, storageId.ResourceGroupName, storageId.StorageAccountName, "") if err != nil { if utils.ResponseWasNotFound(storageAccount.Response) { - return fmt.Errorf("%s was not found", storageID) + return fmt.Errorf("%s was not found", storageId) } - return fmt.Errorf("checking for existence of %s: %+v", storageID, err) + return fmt.Errorf("checking for existence of %s: %v", storageId, err) } if acl != nil || owner != nil || group != nil { @@ -286,8 +295,8 @@ func resourceStorageDataLakeGen2PathUpdate(d *pluginsdk.ResourceData, meta inter Owner: owner, Group: group, } - if _, err := client.SetAccessControl(ctx, id.AccountName, id.FileSystemName, path, accessControlInput); err != nil { - return fmt.Errorf("setting access control for Path %q in File System %q in Storage Account %q: %s", path, id.FileSystemName, id.AccountName, err) + if _, err = client.SetAccessControl(ctx, id.FileSystemName, path, accessControlInput); err != nil { + return fmt.Errorf("setting access control for %s: %s", id, err) } } @@ -295,24 +304,25 @@ func resourceStorageDataLakeGen2PathUpdate(d *pluginsdk.ResourceData, meta inter } func resourceStorageDataLakeGen2PathRead(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + client := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := paths.ParseResourceID(d.Id()) + id, err := paths.ParsePathID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - resp, err := client.GetProperties(ctx, id.AccountName, id.FileSystemName, id.Path, paths.GetPropertiesActionGetStatus) + resp, err := client.GetProperties(ctx, id.FileSystemName, id.Path, paths.GetPropertiesInput{Action: paths.GetPropertiesActionGetStatus}) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[INFO] Path %q does not exist in File System %q in Storage Account %q - removing from state...", id.Path, id.FileSystemName, id.AccountName) + if response.WasNotFound(resp.HttpResponse) { + log.Printf("[INFO] Path %q does not exist in File System %q in Storage Account %q - removing from state...", id.Path, id.FileSystemName, id.AccountId.AccountName) d.SetId("") return nil } - return fmt.Errorf("retrieving Path %q in File System %q in Storage Account %q: %+v", id.Path, id.FileSystemName, id.AccountName, err) + return fmt.Errorf("retrieving %s: %v", id, err) } d.Set("path", id.Path) @@ -322,20 +332,20 @@ func resourceStorageDataLakeGen2PathRead(d *pluginsdk.ResourceData, meta interfa // The above `getStatus` API request doesn't return the ACLs // Have to make a `getAccessControl` request, but that doesn't return all fields either! - resp, err = client.GetProperties(ctx, id.AccountName, id.FileSystemName, id.Path, paths.GetPropertiesActionGetAccessControl) + resp, err = client.GetProperties(ctx, id.FileSystemName, id.Path, paths.GetPropertiesInput{Action: paths.GetPropertiesActionGetAccessControl}) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - log.Printf("[INFO] Path %q does not exist in File System %q in Storage Account %q - removing from state...", id.Path, id.FileSystemName, id.AccountName) + if response.WasNotFound(resp.HttpResponse) { + log.Printf("[INFO] Path %q does not exist in File System %q in Storage Account %q - removing from state...", id.Path, id.FileSystemName, id.AccountId.AccountName) d.SetId("") return nil } - return fmt.Errorf("retrieving ACLs for Path %q in File System %q in Storage Account %q: %+v", id.Path, id.FileSystemName, id.AccountName, err) + return fmt.Errorf("retrieving ACLs for %s: %v", id, err) } acl, err := accesscontrol.ParseACL(resp.ACL) if err != nil { - return fmt.Errorf("parsing response ACL %q: %s", resp.ACL, err) + return fmt.Errorf("parsing response ACL %q: %v", resp.ACL, err) } d.Set("ace", FlattenDataLakeGen2AceList(d, acl)) @@ -343,19 +353,20 @@ func resourceStorageDataLakeGen2PathRead(d *pluginsdk.ResourceData, meta interfa } func resourceStorageDataLakeGen2PathDelete(d *pluginsdk.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Storage.ADLSGen2PathsClient + storageClient := meta.(*clients.Client).Storage + client := storageClient.ADLSGen2PathsClient ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := paths.ParseResourceID(d.Id()) + id, err := paths.ParsePathID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - resp, err := client.Delete(ctx, id.AccountName, id.FileSystemName, id.Path) + resp, err := client.Delete(ctx, id.FileSystemName, id.Path) if err != nil { - if !utils.ResponseWasNotFound(resp) { - return fmt.Errorf("deleting Path %q in File System %q in Storage Account %q: %+v", id.Path, id.FileSystemName, id.AccountName, err) + if !response.WasNotFound(resp.HttpResponse) { + return fmt.Errorf("deleting %s: %v", id, err) } } diff --git a/internal/services/storage/storage_data_lake_gen2_path_resource_test.go b/internal/services/storage/storage_data_lake_gen2_path_resource_test.go index b38158910a930..bb352b912530c 100644 --- a/internal/services/storage/storage_data_lake_gen2_path_resource_test.go +++ b/internal/services/storage/storage_data_lake_gen2_path_resource_test.go @@ -6,6 +6,7 @@ package storage_test import ( "context" "fmt" + "github.com/hashicorp/go-azure-helpers/lang/response" "testing" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" @@ -13,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/datalakestore/paths" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/datalakestore/paths" ) type StorageDataLakeGen2PathResource struct{} @@ -131,16 +132,16 @@ func TestAccStorageDataLakeGen2Path_withSuperUsers(t *testing.T) { } func (r StorageDataLakeGen2PathResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := paths.ParseResourceID(state.ID) + id, err := paths.ParsePathID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - resp, err := client.Storage.ADLSGen2PathsClient.GetProperties(ctx, id.AccountName, id.FileSystemName, id.Path, paths.GetPropertiesActionGetStatus) + resp, err := client.Storage.ADLSGen2PathsClient.GetProperties(ctx, id.FileSystemName, id.Path, paths.GetPropertiesInput{Action: paths.GetPropertiesActionGetStatus}) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving Path %q (File System %q / Account %q): %+v", id.Path, id.FileSystemName, id.AccountName, err) + return nil, fmt.Errorf("retrieving Path %q (File System %q / Account %q): %+v", id.Path, id.FileSystemName, id.AccountId.AccountName, err) } return utils.Bool(true), nil } diff --git a/internal/services/storage/storage_queue_resource.go b/internal/services/storage/storage_queue_resource.go index 31b776bd0f1d8..fc0dda42d90da 100644 --- a/internal/services/storage/storage_queue_resource.go +++ b/internal/services/storage/storage_queue_resource.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" @@ -24,8 +25,8 @@ func resourceStorageQueue() *pluginsdk.Resource { Update: resourceStorageQueueUpdate, Delete: resourceStorageQueueDelete, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.StorageQueueDataPlaneID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := parse.StorageQueueDataPlaneID(id, storageDomainSuffix) return err }), @@ -79,32 +80,33 @@ func resourceStorageQueueCreate(d *pluginsdk.ResourceData, meta interface{}) err account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Queue %q: %s", accountName, queueName, err) + return fmt.Errorf("retrieving Account %q for Queue %q: %v", accountName, queueName, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } - client, err := storageClient.QueuesClient(ctx, *account) + client, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Queues Client: %s", err) + return fmt.Errorf("building Queues Client: %v", err) } - resourceId := parse.NewStorageQueueDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, queueName).ID() + id := parse.NewStorageQueueDataPlaneId(accountName, storageClient.AzureEnvironment.StorageEndpointSuffix, queueName).ID() - exists, err := client.Exists(ctx, account.ResourceGroup, accountName, queueName) + exists, err := client.Exists(ctx, queueName) if err != nil { - return fmt.Errorf("checking for presence of existing Queue %q (Storage Account %q): %s", queueName, accountName, err) + return fmt.Errorf("checking for existing %s: %v", id, err) } if exists != nil && *exists { - return tf.ImportAsExistsError("azurerm_storage_queue", resourceId) + return tf.ImportAsExistsError("azurerm_storage_queue", id) } - if err := client.Create(ctx, account.ResourceGroup, accountName, queueName, metaData); err != nil { - return fmt.Errorf("creating Queue %q (Account %q): %+v", queueName, accountName, err) + if err = client.Create(ctx, queueName, metaData); err != nil { + return fmt.Errorf("creating %s: %+v", id, err) } - d.SetId(resourceId) + d.SetId(id) + return resourceStorageQueueRead(d, meta) } @@ -113,7 +115,7 @@ func resourceStorageQueueUpdate(d *pluginsdk.ResourceData, meta interface{}) err ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageQueueDataPlaneID(d.Id()) + id, err := parse.StorageQueueDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } @@ -123,19 +125,19 @@ func resourceStorageQueueUpdate(d *pluginsdk.ResourceData, meta interface{}) err account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Queue %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Queue %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.QueuesClient(ctx, *account) + client, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Queues Client: %s", err) + return fmt.Errorf("building Queues Client: %v", err) } - if err := client.UpdateMetaData(ctx, account.ResourceGroup, id.AccountName, id.Name, metaData); err != nil { - return fmt.Errorf("updating MetaData for Queue %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateMetaData(ctx, id.Name, metaData); err != nil { + return fmt.Errorf("updating MetaData for %s: %v", id, err) } return resourceStorageQueueRead(d, meta) @@ -147,14 +149,14 @@ func resourceStorageQueueRead(d *pluginsdk.ResourceData, meta interface{}) error ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageQueueDataPlaneID(d.Id()) + id, err := parse.StorageQueueDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Queue %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Queue %q: %v", id.AccountName, id.Name, err) } if account == nil { log.Printf("[WARN] Unable to determine Resource Group for Storage Queue %q (Account %s) - assuming removed & removing from state", id.Name, id.AccountName) @@ -162,14 +164,14 @@ func resourceStorageQueueRead(d *pluginsdk.ResourceData, meta interface{}) error return nil } - client, err := storageClient.QueuesClient(ctx, *account) + client, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Queues Client: %s", err) + return fmt.Errorf("building Queues Client: %v", err) } - queue, err := client.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + queue, err := client.Get(ctx, id.Name) if err != nil { - return fmt.Errorf("retrieving Queue %q (Account %q): %+v", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if queue == nil { log.Printf("[INFO] Storage Queue %q no longer exists, removing from state...", id.Name) @@ -195,7 +197,7 @@ func resourceStorageQueueDelete(d *pluginsdk.ResourceData, meta interface{}) err ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageQueueDataPlaneID(d.Id()) + id, err := parse.StorageQueueDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } @@ -210,13 +212,13 @@ func resourceStorageQueueDelete(d *pluginsdk.ResourceData, meta interface{}) err return nil } - client, err := storageClient.QueuesClient(ctx, *account) + client, err := storageClient.QueuesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Queues Client: %s", err) + return fmt.Errorf("building Queues Client: %v", err) } - if err := client.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { - return fmt.Errorf("deleting Storage Queue %q (Account %q): %s", id.Name, id.AccountName, err) + if err = client.Delete(ctx, id.Name); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_queue_resource_test.go b/internal/services/storage/storage_queue_resource_test.go index 178637af8368e..7653ec20fe4fc 100644 --- a/internal/services/storage/storage_queue_resource_test.go +++ b/internal/services/storage/storage_queue_resource_test.go @@ -86,7 +86,7 @@ func TestAccStorageQueue_metaData(t *testing.T) { } func (r StorageQueueResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageQueueDataPlaneID(state.ID) + id, err := parse.StorageQueueDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -97,11 +97,11 @@ func (r StorageQueueResource) Exists(ctx context.Context, client *clients.Client if account == nil { return nil, fmt.Errorf("unable to determine Resource Group for Storage Queue %q (Account %q)", id.Name, id.AccountName) } - queuesClient, err := client.Storage.QueuesClient(ctx, *account) + queuesClient, err := client.Storage.QueuesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Queues Client: %+v", err) } - queue, err := queuesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + queue, err := queuesClient.Get(ctx, id.Name) if err != nil { return nil, fmt.Errorf("retrieving Queue %q (Account %q): %+v", id.Name, id.AccountName, err) } diff --git a/internal/services/storage/storage_share_data_source.go b/internal/services/storage/storage_share_data_source.go index 76f0fc0d026b8..68f537b50cc78 100644 --- a/internal/services/storage/storage_share_data_source.go +++ b/internal/services/storage/storage_share_data_source.go @@ -90,36 +90,38 @@ func dataSourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) err account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %s", accountName, shareName, err) + return fmt.Errorf("retrieving Storage Account %q for Share %q: %s", accountName, shareName, err) } if account == nil { - return fmt.Errorf("unable to locate Account %q for Share %q", accountName, shareName) + return fmt.Errorf("locating Storage Account %q for Share %q", accountName, shareName) } - client, err := storageClient.FileSharesClient(ctx, *account) + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) if err != nil { - return fmt.Errorf("building FileShares Client for Storage Account %q (Resource Group %q): %s", accountName, account.ResourceGroup, err) + return fmt.Errorf("building FileShares Client: %v", err) } - id := parse.NewStorageShareDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, shareName).ID() - props, err := client.Get(ctx, account.ResourceGroup, accountName, shareName) + id := parse.NewStorageShareDataPlaneId(accountName, storageClient.AzureEnvironment.StorageEndpointSuffix, shareName).ID() + + props, err := client.Get(ctx, shareName) if err != nil { - return fmt.Errorf("retrieving Share %q (Account %q / Resource Group %q): %s", shareName, accountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if props == nil { - return fmt.Errorf("share %q was not found in Account %q / Resource Group %q", shareName, accountName, account.ResourceGroup) + return fmt.Errorf("%s was not found", id) } d.SetId(id) d.Set("name", shareName) d.Set("storage_account_name", accountName) d.Set("quota", props.QuotaGB) - if err := d.Set("acl", flattenStorageShareACLs(props.ACLs)); err != nil { - return fmt.Errorf("setting `acl`: %+v", err) + if err = d.Set("acl", flattenStorageShareACLs(props.ACLs)); err != nil { + return fmt.Errorf("setting `acl`: %v", err) } - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %+v", err) + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) } resourceManagerId := parse.NewStorageShareResourceManagerID(storageClient.SubscriptionId, account.ResourceGroup, accountName, "default", shareName) diff --git a/internal/services/storage/storage_share_directory_resource.go b/internal/services/storage/storage_share_directory_resource.go index 67f91fe221340..57060624bc32e 100644 --- a/internal/services/storage/storage_share_directory_resource.go +++ b/internal/services/storage/storage_share_directory_resource.go @@ -10,14 +10,18 @@ import ( "strconv" "time" + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/directories" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/directories" ) func resourceStorageShareDirectory() *pluginsdk.Resource { @@ -27,8 +31,8 @@ func resourceStorageShareDirectory() *pluginsdk.Resource { Update: resourceStorageShareDirectoryUpdate, Delete: resourceStorageShareDirectoryDelete, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := directories.ParseResourceID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := directories.ParseDirectoryID(id, storageDomainSuffix) return err }), @@ -46,17 +50,33 @@ func resourceStorageShareDirectory() *pluginsdk.Resource { ForceNew: true, ValidateFunc: validate.StorageShareDirectoryName, }, + + "storage_share_id": { + Type: pluginsdk.TypeString, + Optional: true, // TODO: make required and forcenew in v4.0 + Computed: true, // TODO: remove computed in v4.0 + ConflictsWith: []string{"share_name", "storage_account_name"}, + ValidateFunc: validation.IsURLWithPath, // note: storage domain suffix cannot be determined at validation time, so just make sure it's a well-formed URL + }, + "share_name": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + Deprecated: "the `share_name` and `storage_account_name` properties have been superseded by the `storage_share_id` property and will be removed in version 4.0 of the AzureRM provider", + ConflictsWith: []string{"storage_share_id"}, + RequiredWith: []string{"storage_account_name"}, + ValidateFunc: validation.StringIsNotEmpty, }, + "storage_account_name": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsNotEmpty, + Type: pluginsdk.TypeString, + Optional: true, + Computed: true, + Deprecated: "the `share_name` and `storage_account_name` properties have been superseded by the `storage_share_id` property and will be removed in version 4.0 of the AzureRM provider", + ConflictsWith: []string{"storage_share_id"}, + RequiredWith: []string{"share_name"}, + ValidateFunc: validation.StringIsNotEmpty, }, "metadata": MetaDataSchema(), @@ -69,62 +89,80 @@ func resourceStorageShareDirectoryCreate(d *pluginsdk.ResourceData, meta interfa defer cancel() storageClient := meta.(*clients.Client).Storage - accountName := d.Get("storage_account_name").(string) - shareName := d.Get("share_name").(string) directoryName := d.Get("name").(string) - metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - account, err := storageClient.FindAccount(ctx, accountName) + var storageShareId *parse.StorageShareDataPlaneId + var err error + if v, ok := d.GetOk("storage_share_id"); ok && v.(string) != "" { + storageShareId, err = parse.StorageShareDataPlaneID(v.(string), storageClient.StorageDomainSuffix) + if err != nil { + return err + } + } else { + // TODO: this is needed until `share_name` / `storage_account_name` are removed in favor of `storage_account_id` + storageShareId = pointer.To(parse.NewStorageShareDataPlaneId(d.Get("storage_account_name").(string), storageClient.StorageDomainSuffix, d.Get("share_name").(string))) + } + + if storageShareId == nil { + return fmt.Errorf("determining storage share ID") + } + + account, err := storageClient.FindAccount(ctx, storageShareId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", accountName, directoryName, shareName, err) + return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %v", storageShareId.AccountName, directoryName, storageShareId.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", storageShareId.AccountName) } - client, err := storageClient.FileShareDirectoriesClient(ctx, *account) + accountId, err := accounts.ParseAccountID(storageShareId.ID(), storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("building File Share Directories Client: %s", err) + return fmt.Errorf("parsing Account ID: %v", err) } - existing, err := client.Get(ctx, accountName, shareName, directoryName) + id := directories.NewDirectoryID(*accountId, storageShareId.Name, directoryName) + + client, err := storageClient.FileShareDirectoriesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Directory %q (File Share %q / Storage Account %q / Resource Group %q): %s", directoryName, shareName, accountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Directories Client: %v", err) + } + + existing, err := client.Get(ctx, storageShareId.Name, directoryName) + if err != nil { + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %s", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { - id := client.GetResourceID(accountName, shareName, directoryName) - return tf.ImportAsExistsError("azurerm_storage_share_directory", id) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_share_directory", id.ID()) } input := directories.CreateDirectoryInput{ MetaData: metaData, } - if _, err := client.Create(ctx, accountName, shareName, directoryName, input); err != nil { - return fmt.Errorf("creating Directory %q (File Share %q / Account %q): %+v", directoryName, shareName, accountName, err) + if _, err = client.Create(ctx, storageShareId.Name, directoryName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } // Storage Share Directories are eventually consistent - log.Printf("[DEBUG] Waiting for Directory %q (File Share %q / Account %q) to become available", directoryName, shareName, accountName) + log.Printf("[DEBUG] Waiting for %s to become available", id) stateConf := &pluginsdk.StateChangeConf{ Pending: []string{"404"}, Target: []string{"200"}, - Refresh: storageShareDirectoryRefreshFunc(ctx, client, accountName, shareName, directoryName), + Refresh: storageShareDirectoryRefreshFunc(ctx, client, id), MinTimeout: 10 * time.Second, ContinuousTargetOccurence: 5, Timeout: d.Timeout(pluginsdk.TimeoutCreate), } - if _, err := stateConf.WaitForStateContext(ctx); err != nil { - return fmt.Errorf("waiting for Directory %q (File Share %q / Account %q) to become available: %s", directoryName, shareName, accountName, err) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return fmt.Errorf("waiting for %s to become available: %v", id, err) } - resourceID := client.GetResourceID(accountName, shareName, directoryName) - d.SetId(resourceID) + d.SetId(id.ID()) return resourceStorageShareDirectoryRead(d, meta) } @@ -134,7 +172,7 @@ func resourceStorageShareDirectoryUpdate(d *pluginsdk.ResourceData, meta interfa defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := directories.ParseResourceID(d.Id()) + id, err := directories.ParseDirectoryID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } @@ -142,21 +180,21 @@ func resourceStorageShareDirectoryUpdate(d *pluginsdk.ResourceData, meta interfa metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", id.AccountName, id.DirectoryName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %v", id.AccountId.AccountName, id.DirectoryPath, id.ShareName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account: %q", id.AccountId.AccountName) } - client, err := storageClient.FileShareDirectoriesClient(ctx, *account) + client, err := storageClient.FileShareDirectoriesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Client: %s", err) + return fmt.Errorf("building File Share Client: %v", err) } - if _, err := client.SetMetaData(ctx, id.AccountName, id.ShareName, id.DirectoryName, metaData); err != nil { - return fmt.Errorf("updating MetaData for Directory %q (File Share %q / Account %q): %+v", id.DirectoryName, id.ShareName, id.AccountName, err) + if _, err = client.SetMetaData(ctx, id.ShareName, id.DirectoryPath, directories.SetMetaDataInput{MetaData: metaData}); err != nil { + return fmt.Errorf("updating Metadata for %s: %v", id, err) } return resourceStorageShareDirectoryRead(d, meta) @@ -167,37 +205,40 @@ func resourceStorageShareDirectoryRead(d *pluginsdk.ResourceData, meta interface defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := directories.ParseResourceID(d.Id()) + id, err := directories.ParseDirectoryID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", id.AccountName, id.DirectoryName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %v", id.AccountId.AccountName, id.DirectoryPath, id.ShareName, err) } if account == nil { - log.Printf("[WARN] Unable to determine Resource Group for Storage Share Directory %q (Share %s, Account %s) - assuming removed & removing from state", id.DirectoryName, id.ShareName, id.AccountName) + log.Printf("[WARN] Unable to determine Resource Group for Storage Share Directory %q (Share %s, Account %s) - assuming removed & removing from state", id.DirectoryPath, id.ShareName, id.AccountId.AccountName) d.SetId("") return nil } - client, err := storageClient.FileShareDirectoriesClient(ctx, *account) + storageShareId := parse.NewStorageShareDataPlaneId(id.AccountId.AccountName, storageClient.StorageDomainSuffix, id.ShareName) + + client, err := storageClient.FileShareDirectoriesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client: %v", err) } - props, err := client.Get(ctx, id.AccountName, id.ShareName, id.DirectoryName) + props, err := client.Get(ctx, id.ShareName, id.DirectoryPath) if err != nil { - return fmt.Errorf("retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } - d.Set("name", id.DirectoryName) + d.Set("name", id.DirectoryPath) + d.Set("storage_share_id", storageShareId.ID()) d.Set("share_name", id.ShareName) - d.Set("storage_account_name", id.AccountName) + d.Set("storage_account_name", id.AccountId.AccountName) - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { - return fmt.Errorf("setting `metadata`: %s", err) + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + return fmt.Errorf("setting `metadata`: %v", err) } return nil @@ -208,38 +249,38 @@ func resourceStorageShareDirectoryDelete(d *pluginsdk.ResourceData, meta interfa defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := directories.ParseResourceID(d.Id()) + id, err := directories.ParseDirectoryID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", id.AccountName, id.DirectoryName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %v", id.AccountId.AccountName, id.DirectoryPath, id.ShareName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - client, err := storageClient.FileShareDirectoriesClient(ctx, *account) + client, err := storageClient.FileShareDirectoriesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client: %v", err) } - if _, err := client.Delete(ctx, id.AccountName, id.ShareName, id.DirectoryName); err != nil { - return fmt.Errorf("deleting Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + if _, err = client.Delete(ctx, id.ShareName, id.DirectoryPath); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil } -func storageShareDirectoryRefreshFunc(ctx context.Context, client *directories.Client, accountName, shareName, directoryName string) pluginsdk.StateRefreshFunc { +func storageShareDirectoryRefreshFunc(ctx context.Context, client *directories.Client, id directories.DirectoryId) pluginsdk.StateRefreshFunc { return func() (interface{}, string, error) { - res, err := client.Get(ctx, accountName, shareName, directoryName) + res, err := client.Get(ctx, id.ShareName, id.DirectoryPath) if err != nil { - return nil, strconv.Itoa(res.StatusCode), fmt.Errorf("retrieving Directory %q (File Share %q / Account %q): %s", directoryName, shareName, accountName, err) + return nil, strconv.Itoa(res.HttpResponse.StatusCode), fmt.Errorf("retrieving %s: %v", id, err) } - return res, strconv.Itoa(res.StatusCode), nil + return res, strconv.Itoa(res.HttpResponse.StatusCode), nil } } diff --git a/internal/services/storage/storage_share_directory_resource_test.go b/internal/services/storage/storage_share_directory_resource_test.go index f68cfde672fce..d57862d10920d 100644 --- a/internal/services/storage/storage_share_directory_resource_test.go +++ b/internal/services/storage/storage_share_directory_resource_test.go @@ -8,12 +8,13 @@ import ( "fmt" "testing" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/directories" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/directories" ) type StorageShareDirectoryResource struct{} @@ -33,6 +34,52 @@ func TestAccStorageShareDirectory_basic(t *testing.T) { }) } +func TestAccStorageShareDirectory_migrateStorageShareId(t *testing.T) { + // TODO: remove test in v4.0 + data := acceptance.BuildTestData(t, "azurerm_storage_share_directory", "test") + r := StorageShareDirectoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageShareDirectory_basicDeprecated(t *testing.T) { + // TODO: remove test in v4.0 + data := acceptance.BuildTestData(t, "azurerm_storage_share_directory", "test") + r := StorageShareDirectoryResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basicDeprecated(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func TestAccStorageShareDirectory_uppercase(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_storage_share_directory", "test") r := StorageShareDirectoryResource{} @@ -119,27 +166,27 @@ func TestAccStorageShareDirectory_nested(t *testing.T) { } func (r StorageShareDirectoryResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := directories.ParseResourceID(state.ID) + id, err := directories.ParseDirectoryID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - account, err := client.Storage.FindAccount(ctx, id.AccountName) + account, err := client.Storage.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return nil, fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", id.AccountName, id.DirectoryName, id.ShareName, err) + return nil, fmt.Errorf("retrieving Account %q for Directory %q (Share %q): %s", id.AccountId.AccountName, id.DirectoryPath, id.ShareName, err) } if account == nil { - return nil, fmt.Errorf("unable to determine Resource Group for Storage Share Directory %q (Share %q / Account %q)", id.DirectoryName, id.ShareName, id.AccountName) + return nil, fmt.Errorf("unable to determine Resource Group for Storage Share Directory %q (Share %q / Account %q)", id.DirectoryPath, id.ShareName, id.AccountId.AccountName) } - dirClient, err := client.Storage.FileShareDirectoriesClient(ctx, *account) + dirClient, err := client.Storage.FileShareDirectoriesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return nil, fmt.Errorf("building File Share client for Storage Account %q (Resource Group %q): %+v", id.AccountName, account.ResourceGroup, err) + return nil, fmt.Errorf("building File Share client for Storage Account %q (Resource Group %q): %+v", id.AccountId.AccountName, account.ResourceGroup, err) } - resp, err := dirClient.Get(ctx, id.AccountName, id.ShareName, id.DirectoryName) + resp, err := dirClient.Get(ctx, id.ShareName, id.DirectoryPath) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + return nil, fmt.Errorf("retrieving Storage Share %q (File Share %q / Account %q / Resource Group %q): %s", id.DirectoryPath, id.ShareName, id.AccountId.AccountName, account.ResourceGroup, err) } return utils.Bool(true), nil } @@ -149,6 +196,18 @@ func (r StorageShareDirectoryResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` %s +resource "azurerm_storage_share_directory" "test" { + name = "dir" + storage_share_id = azurerm_storage_share.test.id +} +`, template) +} + +func (r StorageShareDirectoryResource) basicDeprecated(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + resource "azurerm_storage_share_directory" "test" { name = "dir" share_name = azurerm_storage_share.test.name @@ -163,9 +222,8 @@ func (r StorageShareDirectoryResource) uppercase(data acceptance.TestData) strin %s resource "azurerm_storage_share_directory" "test" { - name = "UpperCaseCharacterS" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "UpperCaseCharacterS" + storage_share_id = azurerm_storage_share.test.id } `, template) } @@ -176,9 +234,8 @@ func (r StorageShareDirectoryResource) requiresImport(data acceptance.TestData) %s resource "azurerm_storage_share_directory" "import" { - name = azurerm_storage_share_directory.test.name - share_name = azurerm_storage_share_directory.test.share_name - storage_account_name = azurerm_storage_share_directory.test.storage_account_name + name = azurerm_storage_share_directory.test.name + storage_share_id = azurerm_storage_share.test.id } `, template) } @@ -189,9 +246,8 @@ func (r StorageShareDirectoryResource) complete(data acceptance.TestData) string %s resource "azurerm_storage_share_directory" "test" { - name = "dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "dir" + storage_share_id = azurerm_storage_share.test.id metadata = { hello = "world" @@ -206,9 +262,8 @@ func (r StorageShareDirectoryResource) updated(data acceptance.TestData) string %s resource "azurerm_storage_share_directory" "test" { - name = "dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "dir" + storage_share_id = azurerm_storage_share.test.id metadata = { hello = "world" @@ -224,27 +279,23 @@ func (r StorageShareDirectoryResource) nested(data acceptance.TestData) string { %s resource "azurerm_storage_share_directory" "parent" { - name = "123--parent-dir" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "123--parent-dir" + storage_share_id = azurerm_storage_share.test.id } resource "azurerm_storage_share_directory" "child_one" { - name = "${azurerm_storage_share_directory.parent.name}/child1" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "${azurerm_storage_share_directory.parent.name}/child1" + storage_share_id = azurerm_storage_share.test.id } resource "azurerm_storage_share_directory" "child_two" { - name = "${azurerm_storage_share_directory.child_one.name}/childtwo--123" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "${azurerm_storage_share_directory.child_one.name}/childtwo--123" + storage_share_id = azurerm_storage_share.test.id } resource "azurerm_storage_share_directory" "multiple_child_one" { - name = "${azurerm_storage_share_directory.parent.name}/c" - share_name = azurerm_storage_share.test.name - storage_account_name = azurerm_storage_account.test.name + name = "${azurerm_storage_share_directory.parent.name}/c" + storage_share_id = azurerm_storage_share.test.id } `, template) } diff --git a/internal/services/storage/storage_share_file_resource.go b/internal/services/storage/storage_share_file_resource.go index 48b4cf882332d..a0d766f8df86d 100644 --- a/internal/services/storage/storage_share_file_resource.go +++ b/internal/services/storage/storage_share_file_resource.go @@ -9,15 +9,18 @@ import ( "os" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" storageValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/files" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/files" ) func resourceStorageShareFile() *pluginsdk.Resource { @@ -27,8 +30,8 @@ func resourceStorageShareFile() *pluginsdk.Resource { Update: resourceStorageShareFileUpdate, Delete: resourceStorageShareFileDelete, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := files.ParseResourceID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := files.ParseFileID(id, storageDomainSuffix) return err }), @@ -45,12 +48,14 @@ func resourceStorageShareFile() *pluginsdk.Resource { Required: true, ForceNew: true, }, + "storage_share_id": { Type: pluginsdk.TypeString, Required: true, ForceNew: true, - ValidateFunc: storageValidate.StorageShareID, + ValidateFunc: validation.IsURLWithPath, // note: storage domain suffix cannot be determined at validation time, so just make sure it's a well-formed URL }, + "path": { Type: pluginsdk.TypeString, ForceNew: true, @@ -106,7 +111,7 @@ func resourceStorageShareFileCreate(d *pluginsdk.ResourceData, meta interface{}) defer cancel() storageClient := meta.(*clients.Client).Storage - storageShareID, err := parse.StorageShareDataPlaneID(d.Get("storage_share_id").(string)) + storageShareId, err := parse.StorageShareDataPlaneID(d.Get("storage_share_id").(string), storageClient.StorageDomainSuffix) if err != nil { return err } @@ -114,42 +119,48 @@ func resourceStorageShareFileCreate(d *pluginsdk.ResourceData, meta interface{}) fileName := d.Get("name").(string) path := d.Get("path").(string) - account, err := storageClient.FindAccount(ctx, storageShareID.AccountName) + account, err := storageClient.FindAccount(ctx, storageShareId.AccountName) if err != nil { - return fmt.Errorf("eretrieving Account %q for File %q (Share %q): %s", storageShareID.AccountName, fileName, storageShareID.Name, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %v", storageShareId.AccountName, fileName, storageShareId.Name, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", storageShareID.AccountName) + return fmt.Errorf("locating Storage Account %q", storageShareId.AccountName) } - fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + accountId, err := accounts.ParseAccountID(storageShareId.ID(), storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("building File Share Directories Client: %s", err) + return fmt.Errorf("parsing Account ID: %v", err) } - share, err := fileSharesClient.Get(ctx, account.ResourceGroup, storageShareID.AccountName, storageShareID.Name) + id := files.NewFileID(*accountId, storageShareId.Name, path, fileName) + + fileSharesClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("retrieving Share %q for File %q: %s", storageShareID.Name, fileName, err) + return fmt.Errorf("building File Share Directories Client: %v", err) + } + + share, err := fileSharesClient.Get(ctx, storageShareId.Name) + if err != nil { + return fmt.Errorf("retrieving Share %q for File %q: %v", storageShareId.Name, fileName, err) } if share == nil { - return fmt.Errorf("unable to locate Storage Share %q", storageShareID.Name) + return fmt.Errorf("unable to locate Storage Share %q", storageShareId.Name) } - client, err := storageClient.FileShareFilesClient(ctx, *account) + client, err := storageClient.FileShareFilesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building File Share Directories Client: %s", err) } - existing, err := client.GetProperties(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName) + existing, err := client.GetProperties(ctx, storageShareId.Name, path, fileName) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", fileName, storageShareID.Name, storageShareID.AccountName, account.ResourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %v", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { - id := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) - return tf.ImportAsExistsError("azurerm_storage_share_file", id) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_share_file", id.ID()) } input := files.CreateInput{ @@ -167,33 +178,32 @@ func resourceStorageShareFileCreate(d *pluginsdk.ResourceData, meta interface{}) if v, ok := d.GetOk("source"); ok { file, err = os.Open(v.(string)) if err != nil { - return fmt.Errorf("opening file : %s", err) + return fmt.Errorf("opening file: %s", err) } info, err := file.Stat() if err != nil { - return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + return fmt.Errorf("'stat'-ing File %q (File Share %q / Account %q): %v", fileName, storageShareId.Name, storageShareId.AccountName, err) } if info.Size() == 0 { - return fmt.Errorf("file %q (File Share %q / Account %q) is empty", fileName, storageShareID.Name, storageShareID.AccountName) + return fmt.Errorf("file %q (File Share %q / Account %q) is empty", fileName, storageShareId.Name, storageShareId.AccountName) } input.ContentLength = info.Size() } - if _, err := client.Create(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, input); err != nil { - return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + if _, err = client.Create(ctx, storageShareId.Name, path, fileName, input); err != nil { + return fmt.Errorf("creating File %q (File Share %q / Account %q): %v", fileName, storageShareId.Name, storageShareId.AccountName, err) } if file != nil { - if err := client.PutFile(ctx, storageShareID.AccountName, storageShareID.Name, path, fileName, file, 4); err != nil { - return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %+v", fileName, storageShareID.Name, storageShareID.AccountName, err) + if err = client.PutFile(ctx, storageShareId.Name, path, fileName, file, 4); err != nil { + return fmt.Errorf("uploading File: %q (File Share %q / Account %q): %v", fileName, storageShareId.Name, storageShareId.AccountName, err) } } - resourceID := client.GetResourceID(storageShareID.AccountName, storageShareID.Name, path, fileName) - d.SetId(resourceID) + d.SetId(id.ID()) return resourceStorageShareFileRead(d, meta) } @@ -203,41 +213,41 @@ func resourceStorageShareFileUpdate(d *pluginsdk.ResourceData, meta interface{}) defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := files.ParseResourceID(d.Id()) + id, err := files.ParseFileID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for %s: %v", id.AccountId.AccountName, id, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + fileSharesClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Directories Client: %s", err) + return fmt.Errorf("building File Share Directories Client: %v", err) } - share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + share, err := fileSharesClient.Get(ctx, id.ShareName) if err != nil { - return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if share == nil { return fmt.Errorf("unable to locate Storage Share %q", id.ShareName) } - client, err := storageClient.FileShareFilesClient(ctx, *account) + client, err := storageClient.FileShareFilesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Files Client: %s", err) + return fmt.Errorf("building File Share Files Client: %v", err) } - existing, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + existing, err := client.GetProperties(ctx, id.ShareName, id.DirectoryPath, id.FileName) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for presence of existing %s: %v", id, err) } } @@ -254,8 +264,8 @@ func resourceStorageShareFileUpdate(d *pluginsdk.ResourceData, meta interface{}) input.ContentMD5 = utils.String(v.(string)) } - if _, err := client.SetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName, input); err != nil { - return fmt.Errorf("creating File %q (File Share %q / Account %q): %+v", id.FileName, id.ShareName, id.AccountName, err) + if _, err = client.SetProperties(ctx, id.ShareName, id.DirectoryPath, id.FileName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } } @@ -267,53 +277,53 @@ func resourceStorageShareFileRead(d *pluginsdk.ResourceData, meta interface{}) e defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := files.ParseResourceID(d.Id()) + id, err := files.ParseFileID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountId.AccountName, id.FileName, id.ShareName, err) } if account == nil { - log.Printf("[WARN] Unable to determine Storage Account for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + log.Printf("[WARN] Unable to determine Storage Account for %s - assuming removed & removing from state", id) d.SetId("") return nil } - fileSharesClient, err := storageClient.FileSharesClient(ctx, *account) + fileSharesClient, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building File Share Directories Client: %s", err) } - share, err := fileSharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.ShareName) + share, err := fileSharesClient.Get(ctx, id.ShareName) if err != nil { return fmt.Errorf("retrieving Share %q for File %q: %s", id.ShareName, id.FileName, err) } if share == nil { - log.Printf("[WARN] Unable to determine Storage Share for Storage Share File %q (Share %s, Account %s) - assuming removed & removing from state", id.FileName, id.ShareName, id.AccountName) + log.Printf("[WARN] Unable to determine Storage Share for %s - assuming removed & removing from state", id) d.SetId("") return nil } - client, err := storageClient.FileShareFilesClient(ctx, *account) + client, err := storageClient.FileShareFilesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client for Storage Account %s: %s", id.AccountId, err) } - props, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + props, err := client.GetProperties(ctx, id.ShareName, id.DirectoryPath, id.FileName) if err != nil { - log.Printf("Retrieving Storage File Share file %q (Directory %q / File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.DirectoryName, id.ShareName, id.AccountName, account.ResourceGroup, err) + log.Printf("retrieving %s: %s", id, err) d.SetId("") return nil } d.Set("name", id.FileName) - d.Set("path", id.DirectoryName) - d.Set("storage_share_id", parse.NewStorageShareDataPlaneId(id.AccountName, storageClient.Environment.StorageEndpointSuffix, id.ShareName).ID()) + d.Set("path", id.DirectoryPath) + d.Set("storage_share_id", parse.NewStorageShareDataPlaneId(id.AccountId.AccountName, storageClient.AzureEnvironment.StorageEndpointSuffix, id.ShareName).ID()) - if err := d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { + if err = d.Set("metadata", FlattenMetaData(props.MetaData)); err != nil { return fmt.Errorf("setting `metadata`: %s", err) } d.Set("content_type", props.ContentType) @@ -335,26 +345,26 @@ func resourceStorageShareFileDelete(d *pluginsdk.ResourceData, meta interface{}) defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := files.ParseResourceID(d.Id()) + id, err := files.ParseFileID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return fmt.Errorf("retrieving Account %q for File %q (Share %q): %v", id.AccountId.AccountName, id.FileName, id.ShareName, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - client, err := storageClient.FileShareFilesClient(ctx, *account) + client, err := storageClient.FileShareFilesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building File Share File Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share File Client for Storage Account %q (Resource Group %q): %v", id.AccountId.AccountName, account.ResourceGroup, err) } - if _, err := client.Delete(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName); err != nil { - return fmt.Errorf("deleting Storage Share File %q (File Share %q / Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + if _, err = client.Delete(ctx, id.ShareName, id.DirectoryPath, id.FileName); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_share_file_resource_test.go b/internal/services/storage/storage_share_file_resource_test.go index 2a2b5b0b3d643..e488f909e9be8 100644 --- a/internal/services/storage/storage_share_file_resource_test.go +++ b/internal/services/storage/storage_share_file_resource_test.go @@ -10,12 +10,13 @@ import ( "regexp" "testing" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/files" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/files" ) type StorageShareFileResource struct{} @@ -137,28 +138,28 @@ func TestAccAzureRMStorageShareFile_withEmptyFile(t *testing.T) { } func (StorageShareFileResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := files.ParseResourceID(state.ID) + id, err := files.ParseFileID(state.ID, clients.Storage.StorageDomainSuffix) if err != nil { return nil, err } - account, err := clients.Storage.FindAccount(ctx, id.AccountName) + account, err := clients.Storage.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return nil, fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountName, id.FileName, id.ShareName, err) + return nil, fmt.Errorf("retrieving Account %q for File %q (Share %q): %s", id.AccountId.AccountName, id.FileName, id.ShareName, err) } if account == nil { return utils.Bool(false), nil } - client, err := clients.Storage.FileShareFilesClient(ctx, *account) + client, err := clients.Storage.FileShareFilesDataPlaneClient(ctx, *account, clients.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building File Share Files Client: %s", err) } - resp, err := client.GetProperties(ctx, id.AccountName, id.ShareName, id.DirectoryName, id.FileName) + resp, err := client.GetProperties(ctx, id.ShareName, id.DirectoryPath, id.FileName) if err != nil { - if !utils.ResponseWasNotFound(resp.Response) { - return nil, fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountName, account.ResourceGroup, err) + if !response.WasNotFound(resp.HttpResponse) { + return nil, fmt.Errorf("checking for presence of existing File %q (File Share %q / Storage Account %q / Resource Group %q): %s", id.FileName, id.ShareName, id.AccountId.AccountName, account.ResourceGroup, err) } } diff --git a/internal/services/storage/storage_share_resource.go b/internal/services/storage/storage_share_resource.go index 42c4f458f3419..86b934d172df9 100644 --- a/internal/services/storage/storage_share_resource.go +++ b/internal/services/storage/storage_share_resource.go @@ -11,13 +11,14 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage" // nolint: staticcheck "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/file/shares" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/file/shares" ) func resourceStorageShare() *pluginsdk.Resource { @@ -27,8 +28,8 @@ func resourceStorageShare() *pluginsdk.Resource { Update: resourceStorageShareUpdate, Delete: resourceStorageShareDelete, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.StorageShareDataPlaneID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := parse.StorageShareDataPlaneID(id, storageDomainSuffix) return err }), @@ -158,12 +159,14 @@ func resourceStorageShareCreate(d *pluginsdk.ResourceData, meta interface{}) err account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %s", accountName, shareName, err) + return fmt.Errorf("retrieving Account %q for Share %q: %v", accountName, shareName, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } + id := parse.NewStorageShareDataPlaneId(accountName, storageClient.StorageDomainSuffix, shareName) + protocol := shares.ShareProtocol(d.Get("enabled_protocol").(string)) if protocol == shares.NFS { // Only FileStorage (whose sku tier is Premium only) storage account is able to have NFS file shares. @@ -173,19 +176,18 @@ func resourceStorageShareCreate(d *pluginsdk.ResourceData, meta interface{}) err } } - client, err := storageClient.FileSharesClient(ctx, *account) + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) if err != nil { - return fmt.Errorf("building File Share Client: %s", err) + return fmt.Errorf("building File Share Client: %v", err) } - id := parse.NewStorageShareDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, shareName).ID() - - exists, err := client.Exists(ctx, account.ResourceGroup, accountName, shareName) + exists, err := client.Exists(ctx, shareName) if err != nil { - return fmt.Errorf("checking for existence of existing Storage Share %q (Account %q / Resource Group %q): %+v", shareName, accountName, account.ResourceGroup, err) + return fmt.Errorf("checking for existing %s: %v", id, err) } if exists != nil && *exists { - return tf.ImportAsExistsError("azurerm_storage_share", id) + return tf.ImportAsExistsError("azurerm_storage_share", id.ID()) } log.Printf("[INFO] Creating Share %q in Storage Account %q", shareName, accountName) @@ -200,13 +202,14 @@ func resourceStorageShareCreate(d *pluginsdk.ResourceData, meta interface{}) err input.AccessTier = &tier } - if err := client.Create(ctx, account.ResourceGroup, accountName, shareName, input); err != nil { - return fmt.Errorf("creating Share %q (Account %q / Resource Group %q): %+v", shareName, accountName, account.ResourceGroup, err) + if err = client.Create(ctx, shareName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } - d.SetId(id) - if err := client.UpdateACLs(ctx, account.ResourceGroup, accountName, shareName, acls); err != nil { - return fmt.Errorf("setting ACL's for Share %q (Account %q / Resource Group %q): %+v", shareName, accountName, account.ResourceGroup, err) + d.SetId(id.ID()) + + if err = client.UpdateACLs(ctx, shareName, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { + return fmt.Errorf("setting ACLs for %s: %v", id, err) } return resourceStorageShareRead(d, meta) @@ -217,14 +220,14 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := parse.StorageShareDataPlaneID(d.Id()) + id, err := parse.StorageShareDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountName, id.Name, err) } if account == nil { log.Printf("[WARN] Unable to determine Account %q for Storage Share %q - assuming removed & removing from state", id.AccountName, id.Name) @@ -232,12 +235,13 @@ func resourceStorageShareRead(d *pluginsdk.ResourceData, meta interface{}) error return nil } - client, err := storageClient.FileSharesClient(ctx, *account) + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %v", id.AccountName, account.ResourceGroup, err) } - props, err := client.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + props, err := client.Get(ctx, id.Name) if err != nil { return err } @@ -278,70 +282,71 @@ func resourceStorageShareUpdate(d *pluginsdk.ResourceData, meta interface{}) err defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := parse.StorageShareDataPlaneID(d.Id()) + id, err := parse.StorageShareDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.FileSharesClient(ctx, *account) + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %v", id.AccountName, account.ResourceGroup, err) } if d.HasChange("quota") { - log.Printf("[DEBUG] Updating the Quota for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updating the Quota for %s", id) quota := d.Get("quota").(int) - if err := client.UpdateQuota(ctx, account.ResourceGroup, id.AccountName, id.Name, quota); err != nil { - return fmt.Errorf("updating Quota for File Share %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateQuota(ctx, id.Name, quota); err != nil { + return fmt.Errorf("updating Quota for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the Quota for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updated the Quota for %s", id) } if d.HasChange("metadata") { - log.Printf("[DEBUG] Updating the MetaData for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updating the MetaData for %s", id) metaDataRaw := d.Get("metadata").(map[string]interface{}) metaData := ExpandMetaData(metaDataRaw) - if err := client.UpdateMetaData(ctx, account.ResourceGroup, id.AccountName, id.Name, metaData); err != nil { - return fmt.Errorf("updating MetaData for File Share %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateMetaData(ctx, id.Name, metaData); err != nil { + return fmt.Errorf("updating MetaData for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the MetaData for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updated the MetaData for %s", id) } if d.HasChange("acl") { - log.Printf("[DEBUG] Updating the ACL's for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updating the ACLs for %s", id) aclsRaw := d.Get("acl").(*pluginsdk.Set).List() acls := expandStorageShareACLs(aclsRaw) - if err := client.UpdateACLs(ctx, account.ResourceGroup, id.AccountName, id.Name, acls); err != nil { - return fmt.Errorf("updating ACL's for File Share %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateACLs(ctx, id.Name, shares.SetAclInput{SignedIdentifiers: acls}); err != nil { + return fmt.Errorf("updating ACLs for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the ACL's for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updated ACLs for %s", id) } if d.HasChange("access_tier") { - log.Printf("[DEBUG] Updating the Access Tier for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updating Access Tier for %s", id) tier := shares.AccessTier(d.Get("access_tier").(string)) - if err := client.UpdateTier(ctx, account.ResourceGroup, id.AccountName, id.Name, tier); err != nil { - return fmt.Errorf("updating Access Tier for File Share %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateTier(ctx, id.Name, tier); err != nil { + return fmt.Errorf("updating Access Tier for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the Access Tier for File Share %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updated Access Tier for %s", id) } return resourceStorageShareRead(d, meta) @@ -352,26 +357,27 @@ func resourceStorageShareDelete(d *pluginsdk.ResourceData, meta interface{}) err defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := parse.StorageShareDataPlaneID(d.Id()) + id, err := parse.StorageShareDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Share %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Account %q for Share %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.FileSharesClient(ctx, *account) + // The files API does not support bearer tokens (@manicminer, 2024-02-15) + client, err := storageClient.FileSharesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingOnlySharedKeyAuth()) if err != nil { - return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %v", id.AccountName, account.ResourceGroup, err) } - if err := client.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { - return fmt.Errorf("deleting File Share %q (Storage Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + if err = client.Delete(ctx, id.Name); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_share_resource_test.go b/internal/services/storage/storage_share_resource_test.go index aec5733e28d29..f9ae4c88cdf17 100644 --- a/internal/services/storage/storage_share_resource_test.go +++ b/internal/services/storage/storage_share_resource_test.go @@ -271,7 +271,7 @@ func TestAccStorageShare_protocolUpdate(t *testing.T) { } func (r StorageShareResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageShareDataPlaneID(state.ID) + id, err := parse.StorageShareDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -284,12 +284,12 @@ func (r StorageShareResource) Exists(ctx context.Context, client *clients.Client return nil, fmt.Errorf("unable to determine Account %q for Storage Share %q", id.AccountName, id.Name) } - sharesClient, err := client.Storage.FileSharesClient(ctx, *account) + sharesClient, err := client.Storage.FileSharesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %+v", id.AccountName, account.ResourceGroup, err) } - props, err := sharesClient.Get(ctx, account.ResourceGroup, id.AccountName, id.Name) + props, err := sharesClient.Get(ctx, id.Name) if err != nil { return nil, fmt.Errorf("retrieving File Share %q (Account %q / Resource Group %q): %+v", id.Name, id.AccountName, account.ResourceGroup, err) } @@ -297,7 +297,7 @@ func (r StorageShareResource) Exists(ctx context.Context, client *clients.Client } func (r StorageShareResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageShareDataPlaneID(state.ID) + id, err := parse.StorageShareDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -310,11 +310,11 @@ func (r StorageShareResource) Destroy(ctx context.Context, client *clients.Clien return nil, fmt.Errorf("unable to determine Account %q for Storage Share %q", id.AccountName, id.Name) } - sharesClient, err := client.Storage.FileSharesClient(ctx, *account) + sharesClient, err := client.Storage.FileSharesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building File Share Client for Storage Account %q (Resource Group %q): %+v", id.AccountName, account.ResourceGroup, err) } - if err := sharesClient.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { + if err := sharesClient.Delete(ctx, id.Name); err != nil { return nil, fmt.Errorf("deleting File Share %q (Account %q / Resource Group %q): %+v", id.Name, id.AccountName, account.ResourceGroup, err) } return utils.Bool(true), nil @@ -537,6 +537,7 @@ func (r StorageShareResource) accessTierStandard(data acceptance.TestData, tier return fmt.Sprintf(` provider "azurerm" { features {} + storage_use_azuread = true } resource "azurerm_resource_group" "test" { diff --git a/internal/services/storage/storage_table_entity_data_source.go b/internal/services/storage/storage_table_entity_data_source.go index 8cda4c4950f05..75ca7869a1233 100644 --- a/internal/services/storage/storage_table_entity_data_source.go +++ b/internal/services/storage/storage_table_entity_data_source.go @@ -12,7 +12,8 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/entities" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/entities" ) func dataSourceStorageTableEntity() *pluginsdk.Resource { @@ -64,25 +65,30 @@ func dataSourceStorageTableEntityRead(d *pluginsdk.ResourceData, meta interface{ defer cancel() storageClient := meta.(*clients.Client).Storage - storageAccountName := d.Get("storage_account_name").(string) + accountName := d.Get("storage_account_name").(string) tableName := d.Get("table_name").(string) partitionKey := d.Get("partition_key").(string) rowKey := d.Get("row_key").(string) - account, err := storageClient.FindAccount(ctx, storageAccountName) + account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", storageAccountName, tableName, err) + return fmt.Errorf("retrieving Account %q for Table %q: %v", accountName, tableName, err) } if account == nil { - return fmt.Errorf("the parent Storage Account %s was not found", storageAccountName) + return fmt.Errorf("the parent Storage Account %s was not found", accountName) } - client, err := storageClient.TableEntityClient(ctx, *account) + client, err := storageClient.TableEntityDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Table Entity Client for Storage Account %q (Resource Group %q): %s", storageAccountName, account.ResourceGroup, err) + return fmt.Errorf("building Table Entity Client for Storage Account %q (Resource Group %q): %v", accountName, account.ResourceGroup, err) } - id := client.GetResourceID(storageAccountName, tableName, partitionKey, rowKey) + accountId, err := accounts.ParseAccountID(accountName, storageClient.StorageDomainSuffix) + if err != nil { + return fmt.Errorf("parsing Account ID: %v", err) + } + + id := entities.NewEntityID(*accountId, tableName, partitionKey, rowKey) input := entities.GetEntityInput{ PartitionKey: partitionKey, @@ -90,19 +96,21 @@ func dataSourceStorageTableEntityRead(d *pluginsdk.ResourceData, meta interface{ MetaDataLevel: entities.NoMetaData, } - result, err := client.Get(ctx, storageAccountName, tableName, input) + result, err := client.Get(ctx, tableName, input) if err != nil { - return fmt.Errorf("retrieving Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", partitionKey, rowKey, tableName, storageAccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } - d.Set("storage_account_name", storageAccountName) + d.Set("storage_account_name", accountName) d.Set("table_name", tableName) d.Set("partition_key", partitionKey) d.Set("row_key", rowKey) - if err := d.Set("entity", flattenEntity(result.Entity)); err != nil { - return fmt.Errorf("setting `entity` for Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", partitionKey, rowKey, tableName, storageAccountName, account.ResourceGroup, err) + + if err = d.Set("entity", flattenEntity(result.Entity)); err != nil { + return fmt.Errorf("setting `entity` for %s: %v", id, err) } - d.SetId(id) + + d.SetId(id.ID()) return nil } diff --git a/internal/services/storage/storage_table_entity_resource.go b/internal/services/storage/storage_table_entity_resource.go index ae5d8e3345776..97a7a85c8f5e8 100644 --- a/internal/services/storage/storage_table_entity_resource.go +++ b/internal/services/storage/storage_table_entity_resource.go @@ -9,14 +9,16 @@ import ( "strings" "time" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/entities" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/blob/accounts" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/entities" ) func resourceStorageTableEntity() *pluginsdk.Resource { @@ -26,8 +28,8 @@ func resourceStorageTableEntity() *pluginsdk.Resource { Update: resourceStorageTableEntityCreateUpdate, Delete: resourceStorageTableEntityDelete, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := entities.ParseResourceID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := entities.ParseEntityID(id, storageDomainSuffix) return err }), @@ -87,21 +89,28 @@ func resourceStorageTableEntityCreateUpdate(d *pluginsdk.ResourceData, meta inte account, err := storageClient.FindAccount(ctx, accountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", accountName, tableName, err) + return fmt.Errorf("retrieving Account %q for Table %q: %v", accountName, tableName, err) } if account == nil { if d.IsNewResource() { - return fmt.Errorf("Unable to locate Account %q for Storage Table %q", accountName, tableName) + return fmt.Errorf("locating Storage Account %q for Table %q", accountName, tableName) } else { - log.Printf("[DEBUG] Unable to locate Account %q for Storage Table %q - assuming removed & removing from state", accountName, tableName) + log.Printf("[DEBUG] Unable to locate Storage Account %q for Table %q - assuming removed & removing from state", accountName, tableName) d.SetId("") return nil } } - client, err := storageClient.TableEntityClient(ctx, *account) + accountId, err := accounts.ParseAccountID(accountName, storageClient.StorageDomainSuffix) if err != nil { - return fmt.Errorf("building Entity Client: %s", err) + return fmt.Errorf("parsing Account ID: %s", err) + } + + id := entities.NewEntityID(*accountId, tableName, partitionKey, rowKey) + + client, err := storageClient.TableEntityDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) + if err != nil { + return fmt.Errorf("building Entity Client: %v", err) } if d.IsNewResource() { @@ -110,16 +119,15 @@ func resourceStorageTableEntityCreateUpdate(d *pluginsdk.ResourceData, meta inte RowKey: rowKey, MetaDataLevel: entities.NoMetaData, } - existing, err := client.Get(ctx, accountName, tableName, input) + existing, err := client.Get(ctx, tableName, input) if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", partitionKey, rowKey, tableName, accountName, account.ResourceGroup, err) + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %v", id, err) } } - if !utils.ResponseWasNotFound(existing.Response) { - id := client.GetResourceID(accountName, tableName, partitionKey, rowKey) - return tf.ImportAsExistsError("azurerm_storage_table_entity", id) + if !response.WasNotFound(existing.HttpResponse) { + return tf.ImportAsExistsError("azurerm_storage_table_entity", id.ID()) } } @@ -129,12 +137,11 @@ func resourceStorageTableEntityCreateUpdate(d *pluginsdk.ResourceData, meta inte Entity: entity, } - if _, err := client.InsertOrMerge(ctx, accountName, tableName, input); err != nil { - return fmt.Errorf("creating Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %+v", partitionKey, rowKey, tableName, accountName, account.ResourceGroup, err) + if _, err = client.InsertOrMerge(ctx, tableName, input); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } - resourceID := client.GetResourceID(accountName, tableName, partitionKey, rowKey) - d.SetId(resourceID) + d.SetId(id.ID()) return resourceStorageTableEntityRead(d, meta) } @@ -144,24 +151,24 @@ func resourceStorageTableEntityRead(d *pluginsdk.ResourceData, meta interface{}) defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := entities.ParseResourceID(d.Id()) + id, err := entities.ParseEntityID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountName, id.TableName, err) + return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountId.AccountName, id.TableName, err) } if account == nil { - log.Printf("[WARN] Unable to determine Resource Group for Storage Table %q (Account %s) - assuming removed & removing from state", id.TableName, id.AccountName) + log.Printf("[WARN] Unable to determine Resource Group for Storage Table %q (Account %s) - assuming removed & removing from state", id.TableName, id.AccountId.AccountName) d.SetId("") return nil } - client, err := storageClient.TableEntityClient(ctx, *account) + client, err := storageClient.TableEntityDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Table Entity Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building Table Entity Client for Storage Account %q (Resource Group %q): %s", id.AccountId.AccountName, account.ResourceGroup, err) } input := entities.GetEntityInput{ @@ -170,17 +177,18 @@ func resourceStorageTableEntityRead(d *pluginsdk.ResourceData, meta interface{}) MetaDataLevel: entities.FullMetaData, } - result, err := client.Get(ctx, id.AccountName, id.TableName, input) + result, err := client.Get(ctx, id.TableName, input) if err != nil { - return fmt.Errorf("retrieving Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", id.PartitionKey, id.RowKey, id.TableName, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } - d.Set("storage_account_name", id.AccountName) + d.Set("storage_account_name", id.AccountId.AccountName) d.Set("table_name", id.TableName) d.Set("partition_key", id.PartitionKey) d.Set("row_key", id.RowKey) - if err := d.Set("entity", flattenEntity(result.Entity)); err != nil { - return fmt.Errorf("setting `entity` for Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", id.PartitionKey, id.RowKey, id.TableName, id.AccountName, account.ResourceGroup, err) + + if err = d.Set("entity", flattenEntity(result.Entity)); err != nil { + return fmt.Errorf("setting `entity` for %s: %v", id, err) } return nil @@ -191,22 +199,22 @@ func resourceStorageTableEntityDelete(d *pluginsdk.ResourceData, meta interface{ defer cancel() storageClient := meta.(*clients.Client).Storage - id, err := entities.ParseResourceID(d.Id()) + id, err := entities.ParseEntityID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } - account, err := storageClient.FindAccount(ctx, id.AccountName) + account, err := storageClient.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountName, id.TableName, err) + return fmt.Errorf("retrieving Storage Account %q for Table %q: %s", id.AccountId.AccountName, id.TableName, err) } if account == nil { - return fmt.Errorf("Storage Account %q was not found!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountId.AccountName) } - client, err := storageClient.TableEntityClient(ctx, *account) + client, err := storageClient.TableEntityDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Entity Client for Storage Account %q (Resource Group %q): %s", id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("building Entity Client for Storage Account %q (Resource Group %q): %s", id.AccountId.AccountName, account.ResourceGroup, err) } input := entities.DeleteEntityInput{ @@ -214,8 +222,8 @@ func resourceStorageTableEntityDelete(d *pluginsdk.ResourceData, meta interface{ RowKey: id.RowKey, } - if _, err := client.Delete(ctx, id.AccountName, id.TableName, input); err != nil { - return fmt.Errorf("deleting Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %s", id.PartitionKey, id.RowKey, id.TableName, id.AccountName, account.ResourceGroup, err) + if _, err = client.Delete(ctx, id.TableName, input); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil diff --git a/internal/services/storage/storage_table_entity_resource_test.go b/internal/services/storage/storage_table_entity_resource_test.go index c2edacd39f652..c62f8565dd124 100644 --- a/internal/services/storage/storage_table_entity_resource_test.go +++ b/internal/services/storage/storage_table_entity_resource_test.go @@ -8,12 +8,13 @@ import ( "fmt" "testing" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/entities" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/entities" ) type StorageTableEntityResource struct{} @@ -116,19 +117,19 @@ func TestAccTableEntity_update_typed(t *testing.T) { } func (r StorageTableEntityResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := entities.ParseResourceID(state.ID) + id, err := entities.ParseEntityID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } - account, err := client.Storage.FindAccount(ctx, id.AccountName) + account, err := client.Storage.FindAccount(ctx, id.AccountId.AccountName) if err != nil { - return nil, fmt.Errorf("retrieving Account %q for Table %q: %+v", id.AccountName, id.TableName, err) + return nil, fmt.Errorf("retrieving Account %q for Table %q: %+v", id.AccountId.AccountName, id.TableName, err) } if account == nil { - return nil, fmt.Errorf("storage Account %q was not found", id.AccountName) + return nil, fmt.Errorf("storage Account %q was not found", id.AccountId.AccountName) } - entitiesClient, err := client.Storage.TableEntityClient(ctx, *account) + entitiesClient, err := client.Storage.TableEntityDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Table Entity Client: %+v", err) } @@ -138,12 +139,12 @@ func (r StorageTableEntityResource) Exists(ctx context.Context, client *clients. RowKey: id.RowKey, MetaDataLevel: entities.NoMetaData, } - resp, err := entitiesClient.Get(ctx, id.AccountName, id.TableName, input) + resp, err := entitiesClient.Get(ctx, id.TableName, input) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } - return nil, fmt.Errorf("retrieving Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %+v", id.PartitionKey, id.RowKey, id.TableName, id.AccountName, account.ResourceGroup, err) + return nil, fmt.Errorf("retrieving Entity (Partition Key %q / Row Key %q) (Table %q / Storage Account %q / Resource Group %q): %+v", id.PartitionKey, id.RowKey, id.TableName, id.AccountId.AccountName, account.ResourceGroup, err) } return utils.Bool(true), nil } diff --git a/internal/services/storage/storage_table_resource.go b/internal/services/storage/storage_table_resource.go index 21581f05798c7..1773867840e43 100644 --- a/internal/services/storage/storage_table_resource.go +++ b/internal/services/storage/storage_table_resource.go @@ -10,13 +10,14 @@ import ( "github.com/hashicorp/terraform-provider-azurerm/helpers/tf" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/helpers" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/migration" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" "github.com/hashicorp/terraform-provider-azurerm/internal/timeouts" - "github.com/tombuildsstuff/giovanni/storage/2020-08-04/table/tables" + "github.com/tombuildsstuff/giovanni/storage/2023-11-03/table/tables" ) func resourceStorageTable() *pluginsdk.Resource { @@ -26,8 +27,8 @@ func resourceStorageTable() *pluginsdk.Resource { Delete: resourceStorageTableDelete, Update: resourceStorageTableUpdate, - Importer: pluginsdk.ImporterValidatingResourceId(func(id string) error { - _, err := parse.StorageTableDataPlaneID(id) + Importer: helpers.ImporterValidatingStorageResourceId(func(id, storageDomainSuffix string) error { + _, err := parse.StorageTableDataPlaneID(id, storageDomainSuffix) return err }), @@ -114,32 +115,32 @@ func resourceStorageTableCreate(d *pluginsdk.ResourceData, meta interface{}) err return fmt.Errorf("retrieving Account %q for Table %q: %s", accountName, tableName, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", accountName) + return fmt.Errorf("locating Storage Account %q", accountName) } - client, err := storageClient.TablesClient(ctx, *account) + client, err := storageClient.TablesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return fmt.Errorf("building Table Client: %s", err) } - id := parse.NewStorageTableDataPlaneId(accountName, storageClient.Environment.StorageEndpointSuffix, tableName).ID() + id := parse.NewStorageTableDataPlaneId(accountName, storageClient.AzureEnvironment.StorageEndpointSuffix, tableName) - exists, err := client.Exists(ctx, account.ResourceGroup, accountName, tableName) + exists, err := client.Exists(ctx, tableName) if err != nil { - return fmt.Errorf("checking for existence of existing Storage Table %q (Account %q / Resource Group %q): %+v", tableName, accountName, account.ResourceGroup, err) + return fmt.Errorf("checking for existing %s: %v", id, err) } if exists != nil && *exists { - return tf.ImportAsExistsError("azurerm_storage_table", id) + return tf.ImportAsExistsError("azurerm_storage_table", id.ID()) } - log.Printf("[DEBUG] Creating Table %q in Storage Account %q.", tableName, accountName) - if err := client.Create(ctx, account.ResourceGroup, accountName, tableName); err != nil { - return fmt.Errorf("creating Table %q within Storage Account %q: %s", tableName, accountName, err) + if err = client.Create(ctx, tableName); err != nil { + return fmt.Errorf("creating %s: %v", id, err) } - d.SetId(id) - if err := client.UpdateACLs(ctx, account.ResourceGroup, accountName, tableName, acls); err != nil { - return fmt.Errorf("setting ACL's for Storage Table %q (Account %q / Resource Group %q): %+v", tableName, accountName, account.ResourceGroup, err) + d.SetId(id.ID()) + + if err = client.UpdateACLs(ctx, tableName, acls); err != nil { + return fmt.Errorf("setting ACLs for %s: %v", id, err) } return resourceStorageTableRead(d, meta) @@ -150,14 +151,14 @@ func resourceStorageTableRead(d *pluginsdk.ResourceData, meta interface{}) error ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageTableDataPlaneID(d.Id()) + id, err := parse.StorageTableDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Storage Account %q for Table %q: %v", id.AccountName, id.Name, err) } if account == nil { log.Printf("Unable to determine Resource Group for Storage Storage Table %q (Account %s) - assuming removed & removing from state", id.Name, id.AccountName) @@ -165,31 +166,31 @@ func resourceStorageTableRead(d *pluginsdk.ResourceData, meta interface{}) error return nil } - client, err := storageClient.TablesClient(ctx, *account) + client, err := storageClient.TablesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Table Client: %s", err) + return fmt.Errorf("building Tables Client: %v", err) } - exists, err := client.Exists(ctx, account.ResourceGroup, id.AccountName, id.Name) + exists, err := client.Exists(ctx, id.Name) if err != nil { - return fmt.Errorf("retrieving Table %q (Storage Account %q / Resource Group %q): %s", id.Name, id.AccountName, account.ResourceGroup, err) + return fmt.Errorf("retrieving %s: %v", id, err) } if exists == nil || !*exists { - log.Printf("[DEBUG] Storage Account %q not found, removing table %q from state", id.AccountName, id.Name) + log.Printf("[DEBUG] %s not found, removing from state", id) d.SetId("") return nil } - acls, err := client.GetACLs(ctx, account.ResourceGroup, id.AccountName, id.Name) + acls, err := client.GetACLs(ctx, id.Name) if err != nil { - return fmt.Errorf("retrieving ACL's %q in Storage Account %q: %s", id.Name, id.AccountName, err) + return fmt.Errorf("retrieving ACLs for %s: %v", id, err) } d.Set("name", id.Name) d.Set("storage_account_name", id.AccountName) - if err := d.Set("acl", flattenStorageTableACLs(acls)); err != nil { - return fmt.Errorf("flattening `acl`: %+v", err) + if err = d.Set("acl", flattenStorageTableACLs(acls)); err != nil { + return fmt.Errorf("setting `acl`: %v", err) } return nil @@ -200,27 +201,26 @@ func resourceStorageTableDelete(d *pluginsdk.ResourceData, meta interface{}) err ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageTableDataPlaneID(d.Id()) + id, err := parse.StorageTableDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Storage Account %q for Table %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("Unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.TablesClient(ctx, *account) + client, err := storageClient.TablesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Table Client: %s", err) + return fmt.Errorf("building Tables Client: %v", err) } - log.Printf("[INFO] Deleting Table %q in Storage Account %q", id.Name, id.AccountName) - if err := client.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { - return fmt.Errorf("deleting Table %q from Storage Account %q: %s", id.Name, id.AccountName, err) + if err = client.Delete(ctx, id.Name); err != nil { + return fmt.Errorf("deleting %s: %v", id, err) } return nil @@ -231,35 +231,35 @@ func resourceStorageTableUpdate(d *pluginsdk.ResourceData, meta interface{}) err ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() - id, err := parse.StorageTableDataPlaneID(d.Id()) + id, err := parse.StorageTableDataPlaneID(d.Id(), storageClient.StorageDomainSuffix) if err != nil { return err } account, err := storageClient.FindAccount(ctx, id.AccountName) if err != nil { - return fmt.Errorf("retrieving Account %q for Table %q: %s", id.AccountName, id.Name, err) + return fmt.Errorf("retrieving Storage Account %q for Table %q: %v", id.AccountName, id.Name, err) } if account == nil { - return fmt.Errorf("unable to locate Storage Account %q!", id.AccountName) + return fmt.Errorf("locating Storage Account %q", id.AccountName) } - client, err := storageClient.TablesClient(ctx, *account) + client, err := storageClient.TablesDataPlaneClient(ctx, *account, storageClient.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { - return fmt.Errorf("building Table Client: %s", err) + return fmt.Errorf("building Table Client: %v", err) } if d.HasChange("acl") { - log.Printf("[DEBUG] Updating the ACL's for Storage Table %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updating ACLs for %s", id) aclsRaw := d.Get("acl").(*pluginsdk.Set).List() acls := expandStorageTableACLs(aclsRaw) - if err := client.UpdateACLs(ctx, account.ResourceGroup, id.AccountName, id.Name, acls); err != nil { - return fmt.Errorf("updating ACL's for Table %q (Storage Account %q): %s", id.Name, id.AccountName, err) + if err = client.UpdateACLs(ctx, id.Name, acls); err != nil { + return fmt.Errorf("updating ACLs for %s: %v", id, err) } - log.Printf("[DEBUG] Updated the ACL's for Storage Table %q (Storage Account %q)", id.Name, id.AccountName) + log.Printf("[DEBUG] Updated ACLs for %s", id) } return resourceStorageTableRead(d, meta) diff --git a/internal/services/storage/storage_table_resource_test.go b/internal/services/storage/storage_table_resource_test.go index 683ac23e02870..b30f6dc68fe93 100644 --- a/internal/services/storage/storage_table_resource_test.go +++ b/internal/services/storage/storage_table_resource_test.go @@ -83,7 +83,7 @@ func TestAccStorageTable_acl(t *testing.T) { } func (r StorageTableResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageTableDataPlaneID(state.ID) + id, err := parse.StorageTableDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -94,16 +94,16 @@ func (r StorageTableResource) Exists(ctx context.Context, client *clients.Client if account == nil { return nil, fmt.Errorf("unable to determine Resource Group for Storage Storage Table %q (Account %q)", id.Name, id.AccountName) } - tablesClient, err := client.Storage.TablesClient(ctx, *account) + tablesClient, err := client.Storage.TablesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Table Client: %+v", err) } - return tablesClient.Exists(ctx, account.ResourceGroup, id.AccountName, id.Name) + return tablesClient.Exists(ctx, id.Name) } func (r StorageTableResource) Destroy(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageTableDataPlaneID(state.ID) + id, err := parse.StorageTableDataPlaneID(state.ID, client.Storage.StorageDomainSuffix) if err != nil { return nil, err } @@ -114,19 +114,19 @@ func (r StorageTableResource) Destroy(ctx context.Context, client *clients.Clien if account == nil { return nil, fmt.Errorf("unable to determine Resource Group for Storage Storage Table %q (Account %q)", id.Name, id.AccountName) } - tablesClient, err := client.Storage.TablesClient(ctx, *account) + tablesClient, err := client.Storage.TablesDataPlaneClient(ctx, *account, client.Storage.DataPlaneOperationSupportingAnyAuthMethod()) if err != nil { return nil, fmt.Errorf("building Table Client: %+v", err) } - exists, err := tablesClient.Exists(ctx, account.ResourceGroup, id.AccountName, id.Name) + exists, err := tablesClient.Exists(ctx, id.Name) if err != nil { return nil, fmt.Errorf("retrieving Table %q (Account %q): %+v", id.Name, id.AccountName, err) } if exists == nil || !*exists { return nil, fmt.Errorf("table %q doesn't exist in Account %q so it can't be deleted", id.Name, id.AccountName) } - if err := tablesClient.Delete(ctx, account.ResourceGroup, id.AccountName, id.Name); err != nil { + if err := tablesClient.Delete(ctx, id.Name); err != nil { return nil, fmt.Errorf("deleting Table %q (Account %q): %+v", id.Name, id.AccountName, err) } return utils.Bool(true), nil diff --git a/internal/services/storage/validate/storage_blob_properties_default_service_version.go b/internal/services/storage/validate/storage_blob_properties_default_service_version.go index f6f3e8611411d..6852d4bf11772 100644 --- a/internal/services/storage/validate/storage_blob_properties_default_service_version.go +++ b/internal/services/storage/validate/storage_blob_properties_default_service_version.go @@ -39,7 +39,7 @@ func BlobPropertiesDefaultServiceVersion(i interface{}, k string) (warnings []st "2020-02-10", "2020-04-08", "2020-06-12", - "2020-08-04", + "2023-11-03", "2020-10-02", "2020-12-06", "2021-02-12", diff --git a/internal/services/storage/validate/storage_container.go b/internal/services/storage/validate/storage_container.go index 365e785a34953..616293abde83c 100644 --- a/internal/services/storage/validate/storage_container.go +++ b/internal/services/storage/validate/storage_container.go @@ -5,6 +5,7 @@ package validate import ( "fmt" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "regexp" "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" @@ -29,16 +30,18 @@ func StorageContainerName(v interface{}, k string) (warnings []string, errors [] return warnings, errors } -func StorageContainerDataPlaneID(input interface{}, key string) (warnings []string, errors []error) { - v, ok := input.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected %q to be a string", key)) - return - } +func StorageContainerDataPlaneIDForDomainSuffix(domainSuffix string) pluginsdk.SchemaValidateFunc { + return func(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } - if _, err := parse.StorageContainerDataPlaneID(v); err != nil { - errors = append(errors, err) - } + if _, err := parse.StorageContainerDataPlaneID(v, domainSuffix); err != nil { + errors = append(errors, err) + } - return + return + } } diff --git a/internal/services/storage/validate/storage_share_name.go b/internal/services/storage/validate/storage_share_name.go index b79e4ee93606f..d9a2a03dcf44e 100644 --- a/internal/services/storage/validate/storage_share_name.go +++ b/internal/services/storage/validate/storage_share_name.go @@ -6,8 +6,6 @@ package validate import ( "fmt" "regexp" - - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" ) func StorageShareName(v interface{}, k string) (warnings []string, errors []error) { @@ -33,18 +31,3 @@ func StorageShareName(v interface{}, k string) (warnings []string, errors []erro } return warnings, errors } - -func StorageShareID(i interface{}, k string) (warnings []string, errors []error) { - v, ok := i.(string) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %q to be string", k)) - return - } - - if _, err := parse.StorageShareDataPlaneID(v); err != nil { - errors = append(errors, fmt.Errorf("Can not parse %q as a resource id: %v", k, err)) - return - } - - return warnings, errors -} diff --git a/website/docs/d/storage_share.html.markdown b/website/docs/d/storage_share.html.markdown index 20763c76e730f..d4e0bbbdda16e 100644 --- a/website/docs/d/storage_share.html.markdown +++ b/website/docs/d/storage_share.html.markdown @@ -10,6 +10,8 @@ description: |- Use this data source to access information about an existing File Share. +~> **Note on Authentication** Shared Key authentication will always be used for this data source, as AzureAD authentication is not supported by the Storage API for files. + ## Example Usage ```hcl diff --git a/website/docs/r/storage_container.html.markdown b/website/docs/r/storage_container.html.markdown index 33465c9183f42..2136d1f4fdcad 100644 --- a/website/docs/r/storage_container.html.markdown +++ b/website/docs/r/storage_container.html.markdown @@ -47,6 +47,8 @@ The following arguments are supported: * `container_access_type` - (Optional) The Access Level configured for this Container. Possible values are `blob`, `container` or `private`. Defaults to `private`. +~> **Note** When updating `container_access_type` for an existing storage container resource, Shared Key authentication will always be used, as AzureAD authentication is not supported. + * `metadata` - (Optional) A mapping of MetaData for this Container. All metadata keys should be lowercase. ## Attributes Reference diff --git a/website/docs/r/storage_share.html.markdown b/website/docs/r/storage_share.html.markdown index e79dc16d0f6dd..7fd0544df73de 100644 --- a/website/docs/r/storage_share.html.markdown +++ b/website/docs/r/storage_share.html.markdown @@ -10,7 +10,9 @@ description: |- Manages a File Share within Azure Storage. -~> **Note:** The storage share supports two storage tiers: premium and standard. Standard file shares are created in general purpose (GPv1 or GPv2) storage accounts and premium file shares are created in FileStorage storage accounts. For further information, refer to the section "What storage tiers are supported in Azure Files?" of [documentation](https://docs.microsoft.com/azure/storage/files/storage-files-faq#general). +~> **Note** The storage share supports two storage tiers: premium and standard. Standard file shares are created in general purpose (GPv1 or GPv2) storage accounts and premium file shares are created in FileStorage storage accounts. For further information, refer to the section "What storage tiers are supported in Azure Files?" of [documentation](https://docs.microsoft.com/azure/storage/files/storage-files-faq#general). + +~> **Note on Authentication** Shared Key authentication will always be used for this resource, as AzureAD authentication is not supported by the Storage API for files. ## Example Usage diff --git a/website/docs/r/storage_share_directory.html.markdown b/website/docs/r/storage_share_directory.html.markdown index e758c74abe461..8cba5456992a8 100644 --- a/website/docs/r/storage_share_directory.html.markdown +++ b/website/docs/r/storage_share_directory.html.markdown @@ -33,9 +33,8 @@ resource "azurerm_storage_share" "example" { } resource "azurerm_storage_share_directory" "example" { - name = "example" - share_name = azurerm_storage_share.example.name - storage_account_name = azurerm_storage_account.example.name + name = "example" + storage_share_id = azurerm_storage_share.example.id } ``` @@ -45,9 +44,7 @@ The following arguments are supported: * `name` - (Required) The name (or path) of the Directory that should be created within this File Share. Changing this forces a new resource to be created. -* `share_name` - (Required) The name of the File Share where this Directory should be created. Changing this forces a new resource to be created. - -* `storage_account_name` - (Required) The name of the Storage Account within which the File Share is located. Changing this forces a new resource to be created. +* `storage_share_id` - (Required) The Storage Share ID in which this file will be placed into. Changing this forces a new resource to be created. * `metadata` - (Optional) A mapping of metadata to assign to this Directory.