Skip to content

Commit

Permalink
Merge pull request #3110 from Lucretius/dfs-web-endpoints
Browse files Browse the repository at this point in the history
[Storage Account] dfs, web endpoints, file secondary endpoint
  • Loading branch information
tombuildsstuff authored Mar 29, 2019
2 parents 0720f45 + de05e0d commit fefaddc
Show file tree
Hide file tree
Showing 4 changed files with 187 additions and 96 deletions.
51 changes: 50 additions & 1 deletion azurerm/data_source_storage_account.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,46 @@ func dataSourceArmStorageAccount() *schema.Resource {
Computed: true,
},

// NOTE: The API does not appear to expose a secondary file endpoint
"primary_web_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"primary_web_host": {
Type: schema.TypeString,
Computed: true,
},

"secondary_web_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_web_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_dfs_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"primary_dfs_host": {
Type: schema.TypeString,
Computed: true,
},

"secondary_dfs_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_dfs_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_file_endpoint": {
Type: schema.TypeString,
Computed: true,
Expand All @@ -162,6 +201,16 @@ func dataSourceArmStorageAccount() *schema.Resource {
Computed: true,
},

"secondary_file_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_file_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_access_key": {
Type: schema.TypeString,
Computed: true,
Expand Down
192 changes: 97 additions & 95 deletions azurerm/resource_arm_storage_account.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,46 @@ func resourceArmStorageAccount() *schema.Resource {
Computed: true,
},

// NOTE: The API does not appear to expose a secondary file endpoint
"primary_web_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"primary_web_host": {
Type: schema.TypeString,
Computed: true,
},

"secondary_web_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_web_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_dfs_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"primary_dfs_host": {
Type: schema.TypeString,
Computed: true,
},

"secondary_dfs_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_dfs_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_file_endpoint": {
Type: schema.TypeString,
Computed: true,
Expand All @@ -270,6 +309,16 @@ func resourceArmStorageAccount() *schema.Resource {
Computed: true,
},

"secondary_file_endpoint": {
Type: schema.TypeString,
Computed: true,
},

"secondary_file_host": {
Type: schema.TypeString,
Computed: true,
},

"primary_access_key": {
Type: schema.TypeString,
Sensitive: true,
Expand Down Expand Up @@ -1033,118 +1082,71 @@ func getBlobConnectionString(blobEndpoint *string, acctName *string, acctKey *st
}

func flattenAndSetAzureRmStorageAccountPrimaryEndpoints(d *schema.ResourceData, primary *storage.Endpoints) error {
var blobEndpoint, blobHost string
if primary != nil {
if v := primary.Blob; v != nil {
blobEndpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid blob endpoint for parsing: %q", *v)
}
blobHost = u.Host
}
if primary == nil {
return fmt.Errorf("primary endpoints should not be empty")
}
d.Set("primary_blob_endpoint", blobEndpoint)
d.Set("primary_blob_host", blobHost)

var queueEndpoint, queueHost string
if primary != nil {
if v := primary.Queue; v != nil {
queueEndpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid queue endpoint for parsing: %q", *v)
}
queueHost = u.Host
}
if err := setEndpointAndHost(d, "primary", primary.Blob, "blob"); err != nil {
return err
}
d.Set("primary_queue_endpoint", queueEndpoint)
d.Set("primary_queue_host", queueHost)

var tableEndpoint, tableHost string
if primary != nil {
if v := primary.Table; v != nil {
tableEndpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid table endpoint for parsing: %q", *v)
}
tableHost = u.Host
}
if err := setEndpointAndHost(d, "primary", primary.Dfs, "dfs"); err != nil {
return err
}
d.Set("primary_table_endpoint", tableEndpoint)
d.Set("primary_table_host", tableHost)

var fileEndpoint, fileHost string
if primary != nil {
if v := primary.File; v != nil {
fileEndpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid file endpoint for parsing: %q", *v)
}
fileHost = u.Host
}
if err := setEndpointAndHost(d, "primary", primary.File, "file"); err != nil {
return err
}
d.Set("primary_file_endpoint", fileEndpoint)
d.Set("primary_file_host", fileHost)

if primary == nil {
return fmt.Errorf("primary endpoints should not be empty")
if err := setEndpointAndHost(d, "primary", primary.Queue, "queue"); err != nil {
return err
}
if err := setEndpointAndHost(d, "primary", primary.Table, "table"); err != nil {
return err
}
if err := setEndpointAndHost(d, "primary", primary.Web, "web"); err != nil {
return err
}

return nil
}

func flattenAndSetAzureRmStorageAccountSecondaryEndpoints(d *schema.ResourceData, secondary *storage.Endpoints) error {
var blobEndpoint, blobHost string
if secondary != nil {
if v := secondary.Blob; v != nil {
blobEndpoint = *v

if u, err := url.Parse(*v); err == nil {
blobHost = u.Host
} else {
return fmt.Errorf("invalid blob endpoint for parsing: %q", *v)
}
}
if secondary == nil {
return nil
}
d.Set("secondary_blob_endpoint", blobEndpoint)
d.Set("secondary_blob_host", blobHost)

var queueEndpoint, queueHost string
if secondary != nil {
if v := secondary.Queue; v != nil {
queueEndpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid queue endpoint for parsing: %q", *v)
}
queueHost = u.Host
}
if err := setEndpointAndHost(d, "secondary", secondary.Blob, "blob"); err != nil {
return err
}
if err := setEndpointAndHost(d, "secondary", secondary.Dfs, "dfs"); err != nil {
return err
}
if err := setEndpointAndHost(d, "secondary", secondary.File, "file"); err != nil {
return err
}
d.Set("secondary_queue_endpoint", queueEndpoint)
d.Set("secondary_queue_host", queueHost)
if err := setEndpointAndHost(d, "secondary", secondary.Queue, "queue"); err != nil {
return err
}
if err := setEndpointAndHost(d, "secondary", secondary.Table, "table"); err != nil {
return err
}
if err := setEndpointAndHost(d, "secondary", secondary.Web, "web"); err != nil {
return err
}
return nil
}

var tableEndpoint, tableHost string
if secondary != nil {
if v := secondary.Table; v != nil {
tableEndpoint = *v
func setEndpointAndHost(d *schema.ResourceData, ordinalString string, endpointType *string, typeString string) error {
var endpoint, host string
if v := endpointType; v != nil {
endpoint = *v

u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid table endpoint for parsing: %q", *v)
}
tableHost = u.Host
u, err := url.Parse(*v)
if err != nil {
return fmt.Errorf("invalid %s endpoint for parsing: %q", typeString, *v)
}
host = u.Host
}
d.Set("secondary_table_endpoint", tableEndpoint)
d.Set("secondary_table_host", tableHost)

d.Set(fmt.Sprintf("%s_%s_endpoint", ordinalString, typeString), endpoint)
d.Set(fmt.Sprintf("%s_%s_host", ordinalString, typeString), host)
return nil
}
20 changes: 20 additions & 0 deletions website/docs/d/storage_account.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,26 @@ output "storage_account_tier" {

* `primary_file_host` - The hostname with port if applicable for file storage in the primary location.

* `secondary_file_endpoint` - The endpoint URL for file storage in the secondary location.

* `secondary_file_host` - The hostname with port if applicable for file storage in the secondary location.

* `primary_dfs_endpoint` - The endpoint URL for DFS storage in the primary location.

* `primary_dfs_host` - The hostname with port if applicable for DFS storage in the primary location.

* `secondary_dfs_endpoint` - The endpoint URL for DFS storage in the secondary location.

* `secondary_dfs_host` - The hostname with port if applicable for DFS storage in the secondary location.

* `primary_web_endpoint` - The endpoint URL for web storage in the primary location.

* `primary_web_host` - The hostname with port if applicable for web storage in the primary location.

* `secondary_web_endpoint` - The endpoint URL for web storage in the secondary location.

* `secondary_web_host` - The hostname with port if applicable for web storage in the secondary location.

* `primary_access_key` - The primary access key for the Storage Account.

* `secondary_access_key` - The secondary access key for the Storage Account.
Expand Down
20 changes: 20 additions & 0 deletions website/docs/r/storage_account.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,26 @@ The following attributes are exported in addition to the arguments listed above:

* `primary_file_host` - The hostname with port if applicable for file storage in the primary location.

* `secondary_file_endpoint` - The endpoint URL for file storage in the secondary location.

* `secondary_file_host` - The hostname with port if applicable for file storage in the secondary location.

* `primary_dfs_endpoint` - The endpoint URL for DFS storage in the primary location.

* `primary_dfs_host` - The hostname with port if applicable for DFS storage in the primary location.

* `secondary_dfs_endpoint` - The endpoint URL for DFS storage in the secondary location.

* `secondary_dfs_host` - The hostname with port if applicable for DFS storage in the secondary location.

* `primary_web_endpoint` - The endpoint URL for web storage in the primary location.

* `primary_web_host` - The hostname with port if applicable for web storage in the primary location.

* `secondary_web_endpoint` - The endpoint URL for web storage in the secondary location.

* `secondary_web_host` - The hostname with port if applicable for web storage in the secondary location.

* `primary_access_key` - The primary access key for the storage account.

* `secondary_access_key` - The secondary access key for the storage account.
Expand Down

0 comments on commit fefaddc

Please sign in to comment.